Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
* git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog:
[WATCHDOG] wdt_pci.c: remove #ifdef CONFIG_WDT_501_PCI
[WATCHDOG] hpwdt: Add NMI priority option
[WATCHDOG] OMAP fixes: enable clock in probe, trigger timer reload
[WATCHDOG] add bcm47xx watchdog driver
[WATCHDOG] Freescale STMP: watchdog driver
[WATCHDOG] twl4030 watchdog driver
[WATCHDOG] U300 COH 901 327 watchdog driver
[WATCHDOG] Add pnx833x_wdt
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 97ad190..6bf6805 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -122,3 +122,10 @@
This symbolic link appears when a device is a Virtual Function.
The symbolic link points to the PCI device sysfs entry of the
Physical Function this device associates with.
+
+What: /sys/bus/pci/slots/.../module
+Date: June 2009
+Contact: linux-pci@vger.kernel.org
+Description:
+ This symbolic link points to the PCI hotplug controller driver
+ module that manages the hotplug slot.
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
new file mode 100644
index 0000000..4d55a18
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -0,0 +1,125 @@
+What: /sys/class/mtd/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ The mtd/ class subdirectory belongs to the MTD subsystem
+ (MTD core).
+
+What: /sys/class/mtd/mtdX/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ The /sys/class/mtd/mtd{0,1,2,3,...} directories correspond
+ to each /dev/mtdX character device. These may represent
+ physical/simulated flash devices, partitions on a flash
+ device, or concatenated flash devices. They exist regardless
+ of whether CONFIG_MTD_CHAR is actually enabled.
+
+What: /sys/class/mtd/mtdXro/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ These directories provide the corresponding read-only device
+ nodes for /sys/class/mtd/mtdX/ . They are only created
+ (for the benefit of udev) if CONFIG_MTD_CHAR is enabled.
+
+What: /sys/class/mtd/mtdX/dev
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Major and minor numbers of the character device corresponding
+ to this MTD device (in <major>:<minor> format). This is the
+ read-write device so <minor> will be even.
+
+What: /sys/class/mtd/mtdXro/dev
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Major and minor numbers of the character device corresponding
+ to the read-only variant of thie MTD device (in
+ <major>:<minor> format). In this case <minor> will be odd.
+
+What: /sys/class/mtd/mtdX/erasesize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ "Major" erase size for the device. If numeraseregions is
+ zero, this is the eraseblock size for the entire device.
+ Otherwise, the MEMGETREGIONCOUNT/MEMGETREGIONINFO ioctls
+ can be used to determine the actual eraseblock layout.
+
+What: /sys/class/mtd/mtdX/flags
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ A hexadecimal value representing the device flags, ORed
+ together:
+
+ 0x0400: MTD_WRITEABLE - device is writable
+ 0x0800: MTD_BIT_WRITEABLE - single bits can be flipped
+ 0x1000: MTD_NO_ERASE - no erase necessary
+ 0x2000: MTD_POWERUP_LOCK - always locked after reset
+
+What: /sys/class/mtd/mtdX/name
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ A human-readable ASCII name for the device or partition.
+ This will match the name in /proc/mtd .
+
+What: /sys/class/mtd/mtdX/numeraseregions
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ For devices that have variable eraseblock sizes, this
+ provides the total number of erase regions. Otherwise,
+ it will read back as zero.
+
+What: /sys/class/mtd/mtdX/oobsize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Number of OOB bytes per page.
+
+What: /sys/class/mtd/mtdX/size
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Total size of the device/partition, in bytes.
+
+What: /sys/class/mtd/mtdX/type
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ One of the following ASCII strings, representing the device
+ type:
+
+ absent, ram, rom, nor, nand, dataflash, ubi, unknown
+
+What: /sys/class/mtd/mtdX/writesize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Minimal writable flash unit size. This will always be
+ a positive integer.
+
+ In the case of NOR flash it is 1 (even though individual
+ bits can be cleared).
+
+ In the case of NAND flash it is one NAND page (or a
+ half page, or a quarter page).
+
+ In the case of ECC NOR, it is the ECC block size.
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index ddeb14b..be21001 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -61,6 +61,10 @@
walkaround, pls. add aerdriver.forceload=y to kernel boot parameter line
when booting kernel. Note that forceload=n by default.
+nosourceid, another parameter of type bool, can be used when broken
+hardware (mostly chipsets) has root ports that cannot obtain the reporting
+source ID. nosourceid=n by default.
+
2.3 AER error output
When a PCI-E AER error is captured, an error message will be outputed to
console. If it's a correctable error, it is outputed as a warning.
@@ -246,3 +250,24 @@
A: It could call the helper functions to enable AER in devices and
cleanup uncorrectable status register. Pls. refer to section 3.3.
+
+4. Software error injection
+
+Debugging PCIE AER error recovery code is quite difficult because it
+is hard to trigger real hardware errors. Software based error
+injection can be used to fake various kinds of PCIE errors.
+
+First you should enable PCIE AER software error injection in kernel
+configuration, that is, following item should be in your .config.
+
+CONFIG_PCIEAER_INJECT=y or CONFIG_PCIEAER_INJECT=m
+
+After reboot with new kernel or insert the module, a device file named
+/dev/aer_inject should be created.
+
+Then, you need a user space tool named aer-inject, which can be gotten
+from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+
+More information about aer-inject can be found in the document comes
+with its source code.
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 8dd6db7..f15621e 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -66,6 +66,10 @@
- info on the Linux implementation of Sys V mandatory file locking.
ncpfs.txt
- info on Novell Netware(tm) filesystem using NCP protocol.
+nfs41-server.txt
+ - info on the Linux server implementation of NFSv4 minor version 1.
+nfs-rdma.txt
+ - how to install and setup the Linux NFS/RDMA client and server software.
nfsroot.txt
- short guide on setting up a diskless box with NFS root filesystem.
nilfs2.txt
diff --git a/Documentation/isdn/00-INDEX b/Documentation/isdn/00-INDEX
index f6010a5..e87e336 100644
--- a/Documentation/isdn/00-INDEX
+++ b/Documentation/isdn/00-INDEX
@@ -14,25 +14,14 @@
- general info on what you need and what to do for Linux ISDN.
README.FAQ
- general info for FAQ.
-README.audio
- - info for running audio over ISDN.
-README.fax
- - info for using Fax over ISDN.
-README.gigaset
- - info on the drivers for Siemens Gigaset ISDN adapters.
-README.icn
- - info on the ICN-ISDN-card and its driver.
->>>>>>> 93af7aca44f0e82e67bda10a0fb73d383edcc8bd:Documentation/isdn/00-INDEX
README.HiSax
- info on the HiSax driver which replaces the old teles.
+README.act2000
+ - info on driver for IBM ACT-2000 card.
README.audio
- info for running audio over ISDN.
README.avmb1
- info on driver for AVM-B1 ISDN card.
-README.act2000
- - info on driver for IBM ACT-2000 card.
-README.eicon
- - info on driver for Eicon active cards.
README.concap
- info on "CONCAP" encapsulation protocol interface used for X.25.
README.diversion
@@ -59,7 +48,3 @@
- info for running X.25 over ISDN.
syncPPP.FAQ
- frequently asked questions about running PPP over ISDN.
-README.hysdn
- - info on driver for Hypercope active HYSDN cards
-README.mISDN
- - info on the Modular ISDN subsystem (mISDN).
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 08def8d..92e1ab8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1006,6 +1006,7 @@
nomerge
forcesac
soft
+ pt [x86, IA64]
io7= [HW] IO7 for Marvel based alpha systems
See comment before marvel_specify_io7 in
@@ -1369,6 +1370,27 @@
min_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory below this
physical address is ignored.
+ mini2440= [ARM,HW,KNL]
+ Format:[0..2][b][c][t]
+ Default: "0tb"
+ MINI2440 configuration specification:
+ 0 - The attached screen is the 3.5" TFT
+ 1 - The attached screen is the 7" TFT
+ 2 - The VGA Shield is attached (1024x768)
+ Leaving out the screen size parameter will not load
+ the TFT driver, and the framebuffer will be left
+ unconfigured.
+ b - Enable backlight. The TFT backlight pin will be
+ linked to the kernel VESA blanking code and a GPIO
+ LED. This parameter is not necessary when using the
+ VGA shield.
+ c - Enable the s3c camera interface.
+ t - Reserved for enabling touchscreen support. The
+ touchscreen support is not enabled in the mainstream
+ kernel as of 2.6.30, a preliminary port can be found
+ in the "bleeding edge" mini2440 support kernel at
+ http://repo.or.cz/w/linux-2.6/mini2440.git
+
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
parameter allows control of the logging verbosity for
@@ -1410,6 +1432,16 @@
mtdparts= [MTD]
See drivers/mtd/cmdlinepart.c.
+ onenand.bdry= [HW,MTD] Flex-OneNAND Boundary Configuration
+
+ Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
+
+ boundary - index of last SLC block on Flex-OneNAND.
+ The remaining blocks are configured as MLC blocks.
+ lock - Configure if Flex-OneNAND boundary should be locked.
+ Once locked, the boundary cannot be changed.
+ 1 indicates lock status, 0 indicates unlock status.
+
mtdset= [ARM]
ARM/S3C2412 JIVE boot control
@@ -1776,6 +1808,9 @@
root domains (aka PCI segments, in ACPI-speak).
nommconf [X86] Disable use of MMCONFIG for PCI
Configuration
+ check_enable_amd_mmconf [X86] check for and enable
+ properly configured MMIO access to PCI
+ config space on AMD family 10h CPU
nomsi [MSI] If the PCI_MSI kernel config parameter is
enabled, this kernel boot option can be used to
disable the use of MSI interrupts system-wide.
@@ -1828,7 +1863,7 @@
IRQ routing is enabled.
noacpi [X86] Do not use ACPI for IRQ routing
or for PCI scanning.
- use_crs [X86] Use _CRS for PCI resource
+ nocrs [X86] Don't use _CRS for PCI resource
allocation.
routeirq Do IRQ routing for all PCI devices.
This is normally done in pci_enable_device(),
@@ -1865,6 +1900,12 @@
PAGE_SIZE is used as alignment.
PCI-PCI bridge can be specified, if resource
windows need to be expanded.
+ ecrc= Enable/disable PCIe ECRC (transaction layer
+ end-to-end CRC checking).
+ bios: Use BIOS/firmware settings. This is the
+ the default.
+ off: Turn ECRC off
+ on: Turn ECRC on.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
diff --git a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt b/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
index 5093ddf..3ed3797 100644
--- a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/esdhc.txt
@@ -10,6 +10,8 @@
- interrupts : should contain eSDHC interrupt.
- interrupt-parent : interrupt source phandle.
- clock-frequency : specifies eSDHC base clock frequency.
+ - sdhci,1-bit-only : (optional) specifies that a controller can
+ only handle 1-bit data transfers.
Example:
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index c8acd86..b486050 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -111,6 +111,8 @@
name: Name assigned by driver to this key (interface or driver name).
type: Driver type string ("wlan", "bluetooth", etc).
+ persistent: Whether the soft blocked state is initialised from
+ non-volatile storage at startup.
state: Current state of the transmitter
0: RFKILL_STATE_SOFT_BLOCKED
transmitter is turned off by software
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index de8e10a..0d8d235 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -139,6 +139,7 @@
acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc)
acer-aspire Acer Aspire 9810
acer-aspire-4930g Acer Aspire 4930G
+ acer-aspire-6530g Acer Aspire 6530G
acer-aspire-8930g Acer Aspire 8930G
medion Medion Laptops
medion-md2 Medion MD2
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index 89093f5..0736518 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -6,8 +6,8 @@
5 -> Leadtek Winfast 2000XP Expert [107d:6611,107d:6613]
6 -> AverTV Studio 303 (M126) [1461:000b]
7 -> MSI TV-@nywhere Master [1462:8606]
- 8 -> Leadtek Winfast DV2000 [107d:6620]
- 9 -> Leadtek PVR 2000 [107d:663b,107d:663c,107d:6632]
+ 8 -> Leadtek Winfast DV2000 [107d:6620,107d:6621]
+ 9 -> Leadtek PVR 2000 [107d:663b,107d:663c,107d:6632,107d:6630,107d:6638,107d:6631,107d:6637,107d:663d]
10 -> IODATA GV-VCP3/PCI [10fc:d003]
11 -> Prolink PlayTV PVR
12 -> ASUS PVR-416 [1043:4823,1461:c111]
@@ -59,7 +59,7 @@
58 -> Pinnacle PCTV HD 800i [11bd:0051]
59 -> DViCO FusionHDTV 5 PCI nano [18ac:d530]
60 -> Pinnacle Hybrid PCTV [12ab:1788]
- 61 -> Winfast TV2000 XP Global [107d:6f18]
+ 61 -> Leadtek TV2000 XP Global [107d:6f18,107d:6618]
62 -> PowerColor RA330 [14f1:ea3d]
63 -> Geniatech X8000-MT DVBT [14f1:8852]
64 -> DViCO FusionHDTV DVB-T PRO [18ac:db30]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index a98a688..873630e 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -65,3 +65,4 @@
67 -> Terratec Grabby (em2860) [0ccd:0096]
68 -> Terratec AV350 (em2860) [0ccd:0084]
69 -> KWorld ATSC 315U HDTV TV Box (em2882) [eb1a:a313]
+ 70 -> Evga inDtube (em2882)
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index d54c1e4..ba4706a 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -390,6 +390,30 @@
To see which chip variants are supported you can look in the i2c driver code
for the i2c_device_id table. This lists all the possibilities.
+There are two more helper functions:
+
+v4l2_i2c_new_subdev_cfg: this function adds new irq and platform_data
+arguments and has both 'addr' and 'probed_addrs' arguments: if addr is not
+0 then that will be used (non-probing variant), otherwise the probed_addrs
+are probed.
+
+For example: this will probe for address 0x10:
+
+struct v4l2_subdev *sd = v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter,
+ "module_foo", "chipid", 0, NULL, 0, I2C_ADDRS(0x10));
+
+v4l2_i2c_new_subdev_board uses an i2c_board_info struct which is passed
+to the i2c driver and replaces the irq, platform_data and addr arguments.
+
+If the subdev supports the s_config core ops, then that op is called with
+the irq and platform_data arguments after the subdev was setup. The older
+v4l2_i2c_new_(probed_)subdev functions will call s_config as well, but with
+irq set to 0 and platform_data set to NULL.
+
+Note that in the next kernel release the functions v4l2_i2c_new_subdev,
+v4l2_i2c_new_probed_subdev and v4l2_i2c_new_probed_subdev_addr will all be
+replaced by a single v4l2_i2c_new_subdev that is identical to
+v4l2_i2c_new_subdev_cfg but without the irq and platform_data arguments.
struct video_device
-------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index dc226e7..5ef3a16 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -653,6 +653,8 @@
L: openezx-devel@lists.openezx.org (subscribers-only)
W: http://www.openezx.org/
S: Maintained
+T: topgit git://git.openezx.org/openezx.git
+F: arch/arm/mach-pxa/ezx.c
ARM/FARADAY FA526 PORT
P: Paulius Zaleckas
@@ -774,11 +776,25 @@
M: philipp.zabel@gmail.com
S: Maintained
+ARM/MIOA701 MACHINE SUPPORT
+P: Robert Jarzmik
+M: robert.jarzmik@free.fr
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+F: arch/arm/mach-pxa/mioa701.c
+S: Maintained
+
ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT
P: Michael Petchkovsky
M: mkpetch@internode.on.net
S: Maintained
+ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
+P: Nelson Castillo
+M: arhuaco@freaks-unidos.net
+L: openmoko-kernel@lists.openmoko.org (subscribers-only)
+W: http://wiki.openmoko.org/wiki/Neo_FreeRunner
+S: Supported
+
ARM/TOSA MACHINE SUPPORT
P: Dmitry Eremin-Solenikov
M: dbaryshkov@gmail.com
@@ -792,6 +808,12 @@
W: http://hackndev.com
S: Maintained
+ARM/PALM TREO 680 SUPPORT
+P: Tomas Cech
+M: sleep_walker@suse.cz
+W: http://hackndev.com
+S: Maintained
+
ARM/PALMZ72 SUPPORT
P: Sergey Lapin
M: slapin@ossfans.org
@@ -1010,6 +1032,13 @@
S: Maintained
F: drivers/mmc/host/at91_mci.c
+ATMEL AT91 / AT32 MCI DRIVER
+P: Nicolas Ferre
+M: nicolas.ferre@atmel.com
+S: Maintained
+F: drivers/mmc/host/atmel-mci.c
+F: drivers/mmc/host/atmel-mci-regs.h
+
ATMEL AT91 / AT32 SERIAL DRIVER
P: Haavard Skinnemoen
M: hskinnemoen@atmel.com
@@ -2795,10 +2824,10 @@
F: drivers/scsi/ips.*
IDE SUBSYSTEM
-P: Bartlomiej Zolnierkiewicz
-M: bzolnier@gmail.com
+P: David S. Miller
+M: davem@davemloft.net
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide-2.6.git
S: Maintained
F: Documentation/ide/
F: drivers/ide/
@@ -2846,7 +2875,7 @@
M: slapin@ossfans.org
L: linux-zigbee-devel@lists.sourceforge.net
W: http://apps.sourceforge.net/trac/linux-zigbee
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lumag/lowpan.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
S: Maintained
F: net/ieee802154/
F: drivers/ieee802154/
@@ -3223,7 +3252,6 @@
S: Maintained
F: fs/jffs2/
F: include/linux/jffs2.h
-F: include/mtd/jffs2-user.h
JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
P: Stephen Tweedie
@@ -5094,6 +5122,13 @@
S: Maintained
F: drivers/mmc/host/sdhci.*
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
+P: Ben Dooks
+M: ben-linux@fluff.org
+L: sdhci-devel@lists.ossman.eu
+S: Maintained
+F: drivers/mmc/host/sdhci-s3c.c
+
SECURITY SUBSYSTEM
P: James Morris
M: jmorris@namei.org
@@ -6216,6 +6251,14 @@
F: Documentation/i2c/busses/i2c-viapro
F: drivers/i2c/busses/i2c-viapro.c
+VIA SD/MMC CARD CONTROLLER DRIVER
+P: Joseph Chan
+M: JosephChan@via.com.tw
+P: Harald Welte
+M: HaraldWelte@viatech.com
+S: Maintained
+F: drivers/mmc/host/via-sdmmc.c
+
VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
P: Joseph Chan
M: JosephChan@via.com.tw
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 4829f96..00a31de 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -146,7 +146,7 @@
/* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo
the fault. */
- fault = handle_mm_fault(mm, vma, address, cause > 0);
+ fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2947510..aef63c8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1241,7 +1241,7 @@
menu "CPU Power Management"
-if (ARCH_SA1100 || ARCH_INTEGRATOR || ARCH_OMAP || ARCH_PXA)
+if (ARCH_SA1100 || ARCH_INTEGRATOR || ARCH_OMAP || ARCH_PXA || ARCH_S3C64XX)
source "drivers/cpufreq/Kconfig"
@@ -1272,6 +1272,10 @@
default y
select CPU_FREQ_DEFAULT_GOV_USERSPACE
+config CPU_FREQ_S3C64XX
+ bool "CPUfreq support for Samsung S3C64XX CPUs"
+ depends on CPU_FREQ && CPU_S3C6410
+
endif
source "drivers/cpuidle/Kconfig"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 01d49be..4515728 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -674,6 +674,15 @@
b __armv4_mmu_cache_off
b __armv5tej_mmu_cache_flush
+#ifdef CONFIG_CPU_FEROCEON_OLD_ID
+ /* this conflicts with the standard ARMv5TE entry */
+ .long 0x41009260 @ Old Feroceon
+ .long 0xff00fff0
+ b __armv4_mmu_cache_on
+ b __armv4_mmu_cache_off
+ b __armv5tej_mmu_cache_flush
+#endif
+
.word 0x66015261 @ FA526
.word 0xff01fff1
b __fa526_cache_on
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 664c7b8..337741f 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -117,7 +117,7 @@
u32 val;
spin_lock(&irq_controller_lock);
- irq_desc[irq].cpu = cpu;
+ irq_desc[irq].node = cpu;
val = readl(reg) & ~(0xff << shift);
val |= 1 << (cpu + shift);
writel(val, reg);
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 887c6eb..6ed8983 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -229,14 +229,18 @@
{
struct vic_device *v = vic_from_irq(irq);
unsigned int off = irq & 31;
+ u32 bit = 1 << off;
if (!v)
return -EINVAL;
+ if (!(bit & v->resume_sources))
+ return -EINVAL;
+
if (on)
- v->resume_irqs |= 1 << off;
+ v->resume_irqs |= bit;
else
- v->resume_irqs &= ~(1 << off);
+ v->resume_irqs &= ~bit;
return 0;
}
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
new file mode 100644
index 0000000..e49ed40
--- /dev/null
+++ b/arch/arm/configs/mini2440_defconfig
@@ -0,0 +1,2097 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.30-rc6
+# Wed May 20 12:29:51 2009
+#
+CONFIG_ARM=y
+CONFIG_HAVE_PWM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_GENERIC_TIME is not set
+# CONFIG_GENERIC_CLOCKEVENTS is not set
+CONFIG_MMU=y
+CONFIG_NO_IOPORT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_INTEGRITY=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+CONFIG_ARCH_S3C2410=y
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_W90X900 is not set
+CONFIG_PLAT_S3C24XX=y
+CONFIG_S3C2410_CLOCK=y
+CONFIG_CPU_S3C244X=y
+CONFIG_S3C24XX_PWM=y
+CONFIG_S3C24XX_GPIO_EXTRA=0
+CONFIG_S3C2410_DMA=y
+# CONFIG_S3C2410_DMA_DEBUG is not set
+CONFIG_S3C24XX_ADC=y
+CONFIG_PLAT_S3C=y
+CONFIG_CPU_LLSERIAL_S3C2440_ONLY=y
+CONFIG_CPU_LLSERIAL_S3C2440=y
+
+#
+# Boot options
+#
+# CONFIG_S3C_BOOT_WATCHDOG is not set
+# CONFIG_S3C_BOOT_ERROR_RESET is not set
+CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+
+#
+# Power management
+#
+# CONFIG_S3C2410_PM_DEBUG is not set
+# CONFIG_S3C2410_PM_CHECK is not set
+CONFIG_S3C_LOWLEVEL_UART_PORT=0
+CONFIG_S3C_GPIO_SPACE=0
+
+#
+# S3C2400 Machines
+#
+CONFIG_S3C2410_PM=y
+CONFIG_S3C2410_GPIO=y
+
+#
+# S3C2410 Machines
+#
+# CONFIG_ARCH_SMDK2410 is not set
+# CONFIG_ARCH_H1940 is not set
+# CONFIG_MACH_N30 is not set
+# CONFIG_ARCH_BAST is not set
+# CONFIG_MACH_OTOM is not set
+# CONFIG_MACH_AML_M5900 is not set
+# CONFIG_MACH_TCT_HAMMER is not set
+# CONFIG_MACH_VR1000 is not set
+# CONFIG_MACH_QT2410 is not set
+
+#
+# S3C2412 Machines
+#
+# CONFIG_MACH_JIVE is not set
+# CONFIG_MACH_SMDK2413 is not set
+# CONFIG_MACH_SMDK2412 is not set
+# CONFIG_MACH_VSTMS is not set
+CONFIG_CPU_S3C2440=y
+CONFIG_S3C2440_DMA=y
+
+#
+# S3C2440 Machines
+#
+# CONFIG_MACH_ANUBIS is not set
+# CONFIG_MACH_OSIRIS is not set
+# CONFIG_MACH_RX3715 is not set
+# CONFIG_ARCH_S3C2440 is not set
+# CONFIG_MACH_NEXCODER_2440 is not set
+# CONFIG_MACH_AT2440EVB is not set
+CONFIG_MACH_MINI2440=y
+
+#
+# S3C2442 Machines
+#
+
+#
+# S3C2443 Machines
+#
+# CONFIG_MACH_SMDK2443 is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4T=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_OUTER_CACHE is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=200
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_APM_EMULATION=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=m
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_XTABLES is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_GARP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=m
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_WIRELESS_OLD_REGULATORY=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_FTL=y
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=y
+CONFIG_RFD_FTL=y
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=y
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_VERIFY_WRITE=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_S3C2410=y
+# CONFIG_MTD_NAND_S3C2410_DEBUG is not set
+# CONFIG_MTD_NAND_S3C2410_HWECC is not set
+# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+CONFIG_MTD_LPDDR=y
+CONFIG_MTD_QINFO_PROBE=y
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=y
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=m
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+CONFIG_DM9000=y
+CONFIG_DM9000_DEBUGLEVEL=4
+# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+# CONFIG_LIBERTAS_USB is not set
+CONFIG_LIBERTAS_SDIO=m
+# CONFIG_LIBERTAS_SPI is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_AR9170_USB is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+CONFIG_ZD1211RW=m
+CONFIG_ZD1211RW_DEBUG=y
+# CONFIG_RT2X00 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+# CONFIG_PPPOE is not set
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=y
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_UARTS=3
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_S3C2440=y
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=128
+CONFIG_IPMI_HANDLER=m
+# CONFIG_IPMI_PANIC_EVENT is not set
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPMI_POWEROFF=m
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_S3C2410=y
+CONFIG_I2C_SIMTEC=y
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+CONFIG_SENSORS_TSL2550=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=y
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_S3C24XX=y
+# CONFIG_SPI_S3C24XX_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IBMAEM is not set
+# CONFIG_SENSORS_IBMPEX is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+CONFIG_THERMAL=m
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_S3C2410_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEO_V4L1=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_AU0828 is not set
+CONFIG_SOC_CAMERA=m
+# CONFIG_SOC_CAMERA_MT9M001 is not set
+# CONFIG_SOC_CAMERA_MT9M111 is not set
+# CONFIG_SOC_CAMERA_MT9T031 is not set
+# CONFIG_SOC_CAMERA_MT9V022 is not set
+# CONFIG_SOC_CAMERA_TW9910 is not set
+CONFIG_SOC_CAMERA_PLATFORM=m
+# CONFIG_SOC_CAMERA_OV772X is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+CONFIG_V4L_USB_DRIVERS=y
+# CONFIG_USB_VIDEO_CLASS is not set
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_MR97310A is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_SQ905 is not set
+# CONFIG_USB_GSPCA_SQ905C is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+CONFIG_USB_GSPCA_ZC3XX=m
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_HDPVR is not set
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_VIDEO_CX231XX is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_USB_VICAM is not set
+# CONFIG_USB_IBMCAM is not set
+# CONFIG_USB_KONICAWC is not set
+# CONFIG_USB_QUICKCAM_MESSENGER is not set
+# CONFIG_USB_ET61X251 is not set
+# CONFIG_VIDEO_OVCAMCHIP is not set
+# CONFIG_USB_OV511 is not set
+# CONFIG_USB_SE401 is not set
+# CONFIG_USB_SN9C102 is not set
+# CONFIG_USB_STV680 is not set
+# CONFIG_USB_ZC0301 is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_USB_PWC_INPUT_EVDEV is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_SI470X is not set
+# CONFIG_USB_MR800 is not set
+# CONFIG_RADIO_TEA5764 is not set
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Supported USB Adapters
+#
+# CONFIG_DVB_USB is not set
+# CONFIG_DVB_SIANO_SMS1XXX is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+# CONFIG_DVB_B2C2_FLEXCOP is not set
+
+#
+# Supported DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_S3C2410=y
+# CONFIG_FB_S3C2410_DEBUG is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_MINI_4x6=y
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_SOC=y
+CONFIG_SND_S3C24XX_SOC=y
+CONFIG_SND_S3C24XX_SOC_I2S=y
+# CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650 is not set
+CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_L3=y
+CONFIG_SND_SOC_UDA134X=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+# CONFIG_DRAGONRISE_FF is not set
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_TOPSEED=y
+# CONFIG_THRUSTMASTER_FF is not set
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+# CONFIG_USB_PRINTER is not set
+CONFIG_USB_WDM=m
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DATAFAB=m
+# CONFIG_USB_STORAGE_FREECOM is not set
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_EZUSB is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+CONFIG_USB_SERIAL_CP210X=m
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=m
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+CONFIG_USB_SERIAL_SPCP8X5=m
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+CONFIG_USB_GADGET_S3C2410=y
+CONFIG_USB_S3C2410=y
+# CONFIG_USB_S3C2410_DEBUG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_S3C=y
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+CONFIG_LEDS_S3C24XX=y
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_S3C=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_ROMFS_BACKED_BY_BLOCK is not set
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+CONFIG_ROMFS_BACKED_BY_BOTH=y
+CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_ROMFS_ON_MTD=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+CONFIG_DEBUG_S3C_UART=0
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_FILE_CAPABILITIES=y
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_FIPS=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_ZLIB=m
+CONFIG_CRYPTO_LZO=m
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 94cc58e..0e97b8c 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -389,6 +389,8 @@
#define __NR_inotify_init1 (__NR_SYSCALL_BASE+360)
#define __NR_preadv (__NR_SYSCALL_BASE+361)
#define __NR_pwritev (__NR_SYSCALL_BASE+362)
+#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
+#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 1680e9e..f776e72 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -372,6 +372,8 @@
/* 360 */ CALL(sys_inotify_init1)
CALL(sys_preadv)
CALL(sys_pwritev)
+ CALL(sys_rt_tgsigqueueinfo)
+ CALL(sys_perf_counter_open)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6874c7d..096f600 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -167,7 +167,7 @@
#ifdef CONFIG_SMP
cpumask_setall(bad_irq_desc.affinity);
- bad_irq_desc.cpu = smp_processor_id();
+ bad_irq_desc.node = smp_processor_id();
#endif
init_arch_irq();
}
@@ -176,7 +176,7 @@
static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
{
- pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
+ pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of(cpu));
@@ -195,7 +195,7 @@
for (i = 0; i < NR_IRQS; i++) {
struct irq_desc *desc = irq_desc + i;
- if (desc->cpu == cpu) {
+ if (desc->node == cpu) {
unsigned int newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1585423..39196df 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -114,9 +114,6 @@
/*
* Function pointers to optional machine specific functions
*/
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -130,20 +127,19 @@
*/
static void default_idle(void)
{
- if (hlt_counter)
- cpu_relax();
- else {
- local_irq_disable();
- if (!need_resched())
- arch_idle();
- local_irq_enable();
- }
+ if (!need_resched())
+ arch_idle();
+ local_irq_enable();
}
+void (*pm_idle)(void) = default_idle;
+EXPORT_SYMBOL(pm_idle);
+
/*
- * The idle thread. We try to conserve power, while trying to keep
- * overall latency low. The architecture specific idle is passed
- * a value to indicate the level of "idleness" of the system.
+ * The idle thread, has rather strange semantics for calling pm_idle,
+ * but this is what x86 does and we need to do the same, so that
+ * things like cpuidle get called in the same way. The only difference
+ * is that we always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
@@ -151,21 +147,31 @@
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
-
+ tick_nohz_stop_sched_tick(1);
+ leds_event(led_idle_start);
+ while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id())) {
- leds_event(led_idle_start);
- cpu_die();
- }
+ if (cpu_is_offline(smp_processor_id()))
+ cpu_die();
#endif
- if (!idle)
- idle = default_idle;
- leds_event(led_idle_start);
- tick_nohz_stop_sched_tick(1);
- while (!need_resched())
- idle();
+ local_irq_disable();
+ if (hlt_counter) {
+ local_irq_enable();
+ cpu_relax();
+ } else {
+ stop_critical_timings();
+ pm_idle();
+ start_critical_timings();
+ /*
+ * This will eventually be removed - pm_idle
+ * functions should always return with IRQs
+ * enabled.
+ */
+ WARN_ON(irqs_disabled());
+ local_irq_enable();
+ }
+ }
leds_event(led_idle_end);
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
@@ -352,6 +358,23 @@
" .size kernel_thread_helper, . - kernel_thread_helper\n"
" .previous");
+#ifdef CONFIG_ARM_UNWIND
+extern void kernel_thread_exit(long code);
+asm( ".section .text\n"
+" .align\n"
+" .type kernel_thread_exit, #function\n"
+"kernel_thread_exit:\n"
+" .fnstart\n"
+" .cantunwind\n"
+" bl do_exit\n"
+" nop\n"
+" .fnend\n"
+" .size kernel_thread_exit, . - kernel_thread_exit\n"
+" .previous");
+#else
+#define kernel_thread_exit do_exit
+#endif
+
/*
* Create a kernel thread.
*/
@@ -363,7 +386,7 @@
regs.ARM_r1 = (unsigned long)arg;
regs.ARM_r2 = (unsigned long)fn;
- regs.ARM_r3 = (unsigned long)do_exit;
+ regs.ARM_r3 = (unsigned long)kernel_thread_exit;
regs.ARM_pc = (unsigned long)kernel_thread_helper;
regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 1dedc2c..dd56e11 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -212,7 +212,8 @@
ctrl->vrs[14] = *vsp++;
ctrl->vrs[SP] = (unsigned long)vsp;
} else if (insn == 0xb0) {
- ctrl->vrs[PC] = ctrl->vrs[LR];
+ if (ctrl->vrs[PC] == 0)
+ ctrl->vrs[PC] = ctrl->vrs[LR];
/* no further processing */
ctrl->entries = 0;
} else if (insn == 0xb1) {
@@ -309,18 +310,20 @@
}
while (ctrl.entries > 0) {
- int urc;
-
- if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
- return -URC_FAILURE;
- urc = unwind_exec_insn(&ctrl);
+ int urc = unwind_exec_insn(&ctrl);
if (urc < 0)
return urc;
+ if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
+ return -URC_FAILURE;
}
if (ctrl.vrs[PC] == 0)
ctrl.vrs[PC] = ctrl.vrs[LR];
+ /* check for infinite loop */
+ if (frame->pc == ctrl.vrs[PC])
+ return -URC_FAILURE;
+
frame->fp = ctrl.vrs[FP];
frame->sp = ctrl.vrs[SP];
frame->lr = ctrl.vrs[LR];
@@ -332,7 +335,6 @@
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- unsigned long high, low;
register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -362,9 +364,6 @@
frame.pc = thread_saved_pc(tsk);
}
- low = frame.sp & ~(THREAD_SIZE - 1);
- high = low + THREAD_SIZE;
-
while (1) {
int urc;
unsigned long where = frame.pc;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 6c07797..4340bf3 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -84,6 +84,14 @@
*(.exitcall.exit)
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
+#ifndef CONFIG_HOTPLUG_CPU
+ *(.ARM.exidx.cpuexit.text)
+ *(.ARM.extab.cpuexit.text)
+#endif
+#ifndef CONFIG_HOTPLUG
+ *(.ARM.exidx.devexit.text)
+ *(.ARM.extab.devexit.text)
+#endif
#ifndef CONFIG_MMU
*(.fixup)
*(__ex_table)
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index aa48284..b520c4b 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -68,10 +68,14 @@
/* none == NAND_ECC_NONE (strongly *not* advised!!)
* soft == NAND_ECC_SOFT
- * 1-bit == NAND_ECC_HW
- * 4-bit == NAND_ECC_HW_SYNDROME (not on all chips)
+ * else == NAND_ECC_HW, according to ecc_bits
+ *
+ * All DaVinci-family chips support 1-bit hardware ECC.
+ * Newer ones also support 4-bit ECC, but are awkward
+ * using it with large page chips.
*/
nand_ecc_modes_t ecc_mode;
+ u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
unsigned options;
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index ba528f8..b0665f1 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -302,7 +302,7 @@
udelay(1);
}
- if (i < MAX_CLOCK_ENABLE_WAIT)
+ if (i <= MAX_CLOCK_ENABLE_WAIT)
pr_debug("Clock %s stable after %d loops\n", name, i);
else
printk(KERN_ERR "Clock %s didn't enable in %d tries\n",
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 9e43fe5..045da92 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -286,6 +286,20 @@
#define MIN_SDRC_DLL_LOCK_FREQ 83000000
+#define CYCLES_PER_MHZ 1000000
+
+/* Scale factor for fixed-point arith in omap3_core_dpll_m2_set_rate() */
+#define SDRC_MPURATE_SCALE 8
+
+/* 2^SDRC_MPURATE_BASE_SHIFT: MPU MHz that SDRC_MPURATE_LOOPS is defined for */
+#define SDRC_MPURATE_BASE_SHIFT 9
+
+/*
+ * SDRC_MPURATE_LOOPS: Number of MPU loops to execute at
+ * 2^MPURATE_BASE_SHIFT MHz for SDRC to stabilize
+ */
+#define SDRC_MPURATE_LOOPS 96
+
/**
* omap3_dpll_recalc - recalculate DPLL rate
* @clk: DPLL struct clk
@@ -709,7 +723,8 @@
{
u32 new_div = 0;
u32 unlock_dll = 0;
- unsigned long validrate, sdrcrate;
+ u32 c;
+ unsigned long validrate, sdrcrate, mpurate;
struct omap_sdrc_params *sp;
if (!clk || !rate)
@@ -718,18 +733,15 @@
if (clk != &dpll3_m2_ck)
return -EINVAL;
- if (rate == clk->rate)
- return 0;
-
validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
if (validrate != rate)
return -EINVAL;
sdrcrate = sdrc_ick.rate;
if (rate > clk->rate)
- sdrcrate <<= ((rate / clk->rate) - 1);
+ sdrcrate <<= ((rate / clk->rate) >> 1);
else
- sdrcrate >>= ((clk->rate / rate) - 1);
+ sdrcrate >>= ((clk->rate / rate) >> 1);
sp = omap2_sdrc_get_params(sdrcrate);
if (!sp)
@@ -740,17 +752,25 @@
unlock_dll = 1;
}
+ /*
+ * XXX This only needs to be done when the CPU frequency changes
+ */
+ mpurate = arm_fck.rate / CYCLES_PER_MHZ;
+ c = (mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT;
+ c += 1; /* for safety */
+ c *= SDRC_MPURATE_LOOPS;
+ c >>= SDRC_MPURATE_SCALE;
+ if (c == 0)
+ c = 1;
+
pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate,
validrate);
pr_debug("clock: SDRC timing params used: %08x %08x %08x\n",
sp->rfr_ctrl, sp->actim_ctrla, sp->actim_ctrlb);
- /* REVISIT: SRAM code doesn't support other M2 divisors yet */
- WARN_ON(new_div != 1 && new_div != 2);
-
- /* REVISIT: Add SDRC_MR changing to this code also */
omap3_configure_core_dpll(sp->rfr_ctrl, sp->actim_ctrla,
- sp->actim_ctrlb, new_div, unlock_dll);
+ sp->actim_ctrlb, new_div, unlock_dll, c,
+ sp->mr, rate > clk->rate);
return 0;
}
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 32afd94..3a86b0f 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <asm/tlb.h>
@@ -241,6 +242,40 @@
omapfb_reserve_sdram();
}
+/*
+ * omap2_init_reprogram_sdrc - reprogram SDRC timing parameters
+ *
+ * Sets the CORE DPLL3 M2 divider to the same value that it's at
+ * currently. This has the effect of setting the SDRC SDRAM AC timing
+ * registers to the values currently defined by the kernel. Currently
+ * only defined for OMAP3; will return 0 if called on OMAP2. Returns
+ * -EINVAL if the dpll3_m2_ck cannot be found, 0 if called on OMAP2,
+ * or passes along the return value of clk_set_rate().
+ */
+static int __init _omap2_init_reprogram_sdrc(void)
+{
+ struct clk *dpll3_m2_ck;
+ int v = -EINVAL;
+ long rate;
+
+ if (!cpu_is_omap34xx())
+ return 0;
+
+ dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck");
+ if (!dpll3_m2_ck)
+ return -EINVAL;
+
+ rate = clk_get_rate(dpll3_m2_ck);
+ pr_info("Reprogramming SDRC clock to %ld Hz\n", rate);
+ v = clk_set_rate(dpll3_m2_ck, rate);
+ if (v)
+ pr_err("dpll3_m2_clk rate change failed: %d\n", v);
+
+ clk_put(dpll3_m2_ck);
+
+ return v;
+}
+
void __init omap2_init_common_hw(struct omap_sdrc_params *sp)
{
omap2_mux_init();
@@ -249,6 +284,7 @@
clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
omap2_clk_init();
omap2_sdrc_init(sp);
+ _omap2_init_reprogram_sdrc();
#endif
gpmc_init();
}
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 73e2971..983f1cb 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -1099,7 +1099,7 @@
(c++ < PWRDM_TRANSITION_BAILOUT))
udelay(1);
- if (c >= PWRDM_TRANSITION_BAILOUT) {
+ if (c > PWRDM_TRANSITION_BAILOUT) {
printk(KERN_ERR "powerdomain: waited too long for "
"powerdomain %s to complete transition\n", pwrdm->name);
return -EAGAIN;
diff --git a/arch/arm/mach-omap2/sram34xx.S b/arch/arm/mach-omap2/sram34xx.S
index c080c82..f41f8d9 100644
--- a/arch/arm/mach-omap2/sram34xx.S
+++ b/arch/arm/mach-omap2/sram34xx.S
@@ -3,13 +3,12 @@
*
* Omap3 specific functions that need to be run in internal SRAM
*
- * (C) Copyright 2007
- * Texas Instruments Inc.
- * Rajendra Nayak <rnayak@ti.com>
+ * Copyright (C) 2004, 2007, 2008 Texas Instruments, Inc.
+ * Copyright (C) 2008 Nokia Corporation
*
- * (C) Copyright 2004
- * Texas Instruments, <www.ti.com>
+ * Rajendra Nayak <rnayak@ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -37,61 +36,112 @@
.text
+/* r4 parameters */
+#define SDRC_NO_UNLOCK_DLL 0x0
+#define SDRC_UNLOCK_DLL 0x1
+
+/* SDRC_DLLA_CTRL bit settings */
+#define FIXEDDELAY_SHIFT 24
+#define FIXEDDELAY_MASK (0xff << FIXEDDELAY_SHIFT)
+#define DLLIDLE_MASK 0x4
+
/*
- * Change frequency of core dpll
- * r0 = sdrc_rfr_ctrl r1 = sdrc_actim_ctrla r2 = sdrc_actim_ctrlb r3 = M2
- * r4 = Unlock SDRC DLL? (1 = yes, 0 = no) -- only unlock DLL for
+ * SDRC_DLLA_CTRL default values: TI hardware team indicates that
+ * FIXEDDELAY should be initialized to 0xf. This apparently was
+ * empirically determined during process testing, so no derivation
+ * was provided.
+ */
+#define FIXEDDELAY_DEFAULT (0x0f << FIXEDDELAY_SHIFT)
+
+/* SDRC_DLLA_STATUS bit settings */
+#define LOCKSTATUS_MASK 0x4
+
+/* SDRC_POWER bit settings */
+#define SRFRONIDLEREQ_MASK 0x40
+#define PWDENA_MASK 0x4
+
+/* CM_IDLEST1_CORE bit settings */
+#define ST_SDRC_MASK 0x2
+
+/* CM_ICLKEN1_CORE bit settings */
+#define EN_SDRC_MASK 0x2
+
+/* CM_CLKSEL1_PLL bit settings */
+#define CORE_DPLL_CLKOUT_DIV_SHIFT 0x1b
+
+/*
+ * omap3_sram_configure_core_dpll - change DPLL3 M2 divider
+ * r0 = new SDRC_RFR_CTRL register contents
+ * r1 = new SDRC_ACTIM_CTRLA register contents
+ * r2 = new SDRC_ACTIM_CTRLB register contents
+ * r3 = new M2 divider setting (only 1 and 2 supported right now)
+ * r4 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for
* SDRC rates < 83MHz
+ * r5 = number of MPU cycles to wait for SDRC to stabilize after
+ * reprogramming the SDRC when switching to a slower MPU speed
+ * r6 = new SDRC_MR_0 register value
+ * r7 = increasing SDRC rate? (1 = yes, 0 = no)
+ *
*/
ENTRY(omap3_sram_configure_core_dpll)
stmfd sp!, {r1-r12, lr} @ store regs to stack
ldr r4, [sp, #52] @ pull extra args off the stack
+ ldr r5, [sp, #56] @ load extra args from the stack
+ ldr r6, [sp, #60] @ load extra args from the stack
+ ldr r7, [sp, #64] @ load extra args from the stack
dsb @ flush buffered writes to interconnect
- cmp r3, #0x2
- blne configure_sdrc
- cmp r4, #0x1
+ cmp r7, #1 @ if increasing SDRC clk rate,
+ bleq configure_sdrc @ program the SDRC regs early (for RFR)
+ cmp r4, #SDRC_UNLOCK_DLL @ set the intended DLL state
bleq unlock_dll
blne lock_dll
- bl sdram_in_selfrefresh @ put the SDRAM in self refresh
- bl configure_core_dpll
- bl enable_sdrc
- cmp r4, #0x1
+ bl sdram_in_selfrefresh @ put SDRAM in self refresh, idle SDRC
+ bl configure_core_dpll @ change the DPLL3 M2 divider
+ bl enable_sdrc @ take SDRC out of idle
+ cmp r4, #SDRC_UNLOCK_DLL @ wait for DLL status to change
bleq wait_dll_unlock
blne wait_dll_lock
- cmp r3, #0x1
- blne configure_sdrc
+ cmp r7, #1 @ if increasing SDRC clk rate,
+ beq return_to_sdram @ return to SDRAM code, otherwise,
+ bl configure_sdrc @ reprogram SDRC regs now
+ mov r12, r5
+ bl wait_clk_stable @ wait for SDRC to stabilize
+return_to_sdram:
isb @ prevent speculative exec past here
mov r0, #0 @ return value
ldmfd sp!, {r1-r12, pc} @ restore regs and return
unlock_dll:
ldr r11, omap3_sdrc_dlla_ctrl
ldr r12, [r11]
- orr r12, r12, #0x4
+ and r12, r12, #FIXEDDELAY_MASK
+ orr r12, r12, #FIXEDDELAY_DEFAULT
+ orr r12, r12, #DLLIDLE_MASK
str r12, [r11] @ (no OCP barrier needed)
bx lr
lock_dll:
ldr r11, omap3_sdrc_dlla_ctrl
ldr r12, [r11]
- bic r12, r12, #0x4
+ bic r12, r12, #DLLIDLE_MASK
str r12, [r11] @ (no OCP barrier needed)
bx lr
sdram_in_selfrefresh:
ldr r11, omap3_sdrc_power @ read the SDRC_POWER register
ldr r12, [r11] @ read the contents of SDRC_POWER
mov r9, r12 @ keep a copy of SDRC_POWER bits
- orr r12, r12, #0x40 @ enable self refresh on idle req
- bic r12, r12, #0x4 @ clear PWDENA
+ orr r12, r12, #SRFRONIDLEREQ_MASK @ enable self refresh on idle
+ bic r12, r12, #PWDENA_MASK @ clear PWDENA
str r12, [r11] @ write back to SDRC_POWER register
ldr r12, [r11] @ posted-write barrier for SDRC
+idle_sdrc:
ldr r11, omap3_cm_iclken1_core @ read the CM_ICLKEN1_CORE reg
ldr r12, [r11]
- bic r12, r12, #0x2 @ disable iclk bit for SDRC
+ bic r12, r12, #EN_SDRC_MASK @ disable iclk bit for SDRC
str r12, [r11]
wait_sdrc_idle:
ldr r11, omap3_cm_idlest1_core
ldr r12, [r11]
- and r12, r12, #0x2 @ check for SDRC idle
- cmp r12, #2
+ and r12, r12, #ST_SDRC_MASK @ check for SDRC idle
+ cmp r12, #ST_SDRC_MASK
bne wait_sdrc_idle
bx lr
configure_core_dpll:
@@ -99,36 +149,23 @@
ldr r12, [r11]
ldr r10, core_m2_mask_val @ modify m2 for core dpll
and r12, r12, r10
- orr r12, r12, r3, lsl #0x1B @ r3 contains the M2 val
+ orr r12, r12, r3, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT
str r12, [r11]
ldr r12, [r11] @ posted-write barrier for CM
- mov r12, #0x800 @ wait for the clock to stabilise
- cmp r3, #2
- bne wait_clk_stable
bx lr
wait_clk_stable:
subs r12, r12, #1
bne wait_clk_stable
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
bx lr
enable_sdrc:
ldr r11, omap3_cm_iclken1_core
ldr r12, [r11]
- orr r12, r12, #0x2 @ enable iclk bit for SDRC
+ orr r12, r12, #EN_SDRC_MASK @ enable iclk bit for SDRC
str r12, [r11]
wait_sdrc_idle1:
ldr r11, omap3_cm_idlest1_core
ldr r12, [r11]
- and r12, r12, #0x2
+ and r12, r12, #ST_SDRC_MASK
cmp r12, #0
bne wait_sdrc_idle1
restore_sdrc_power_val:
@@ -138,14 +175,14 @@
wait_dll_lock:
ldr r11, omap3_sdrc_dlla_status
ldr r12, [r11]
- and r12, r12, #0x4
- cmp r12, #0x4
+ and r12, r12, #LOCKSTATUS_MASK
+ cmp r12, #LOCKSTATUS_MASK
bne wait_dll_lock
bx lr
wait_dll_unlock:
ldr r11, omap3_sdrc_dlla_status
ldr r12, [r11]
- and r12, r12, #0x4
+ and r12, r12, #LOCKSTATUS_MASK
cmp r12, #0x0
bne wait_dll_unlock
bx lr
@@ -156,7 +193,9 @@
str r1, [r11]
ldr r11, omap3_sdrc_actim_ctrlb
str r2, [r11]
- ldr r2, [r11] @ posted-write barrier for SDRC
+ ldr r11, omap3_sdrc_mr_0
+ str r6, [r11]
+ ldr r6, [r11] @ posted-write barrier for SDRC
bx lr
omap3_sdrc_power:
@@ -173,6 +212,8 @@
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_0)
omap3_sdrc_actim_ctrlb:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_0)
+omap3_sdrc_mr_0:
+ .word OMAP34XX_SDRC_REGADDR(SDRC_MR_0)
omap3_sdrc_dlla_status:
.word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
omap3_sdrc_dlla_ctrl:
diff --git a/arch/arm/mach-orion5x/addr-map.c b/arch/arm/mach-orion5x/addr-map.c
index 6f3f77d..d78731e 100644
--- a/arch/arm/mach-orion5x/addr-map.c
+++ b/arch/arm/mach-orion5x/addr-map.c
@@ -200,6 +200,6 @@
int __init orion5x_setup_sram_win(void)
{
- return setup_cpu_win(win_alloc_count, ORION5X_SRAM_PHYS_BASE,
+ return setup_cpu_win(win_alloc_count++, ORION5X_SRAM_PHYS_BASE,
ORION5X_SRAM_SIZE, TARGET_SRAM, ATTR_SRAM, -1);
}
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index eafcc49..f87fa12 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -562,7 +562,7 @@
.resource = orion5x_crypto_res,
};
-int __init orion5x_crypto_init(void)
+static int __init orion5x_crypto_init(void)
{
int ret;
@@ -697,6 +697,14 @@
}
/*
+ * The 5082/5181l/5182/6082/6082l/6183 have crypto
+ * while 5180n/5181/5281 don't have crypto.
+ */
+ if ((dev == MV88F5181_DEV_ID && rev >= MV88F5181L_REV_A0) ||
+ dev == MV88F5182_DEV_ID || dev == MV88F6183_DEV_ID)
+ orion5x_crypto_init();
+
+ /*
* Register watchdog driver
*/
orion5x_wdt_init();
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index de483e8..8f00450 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -38,7 +38,6 @@
void orion5x_uart0_init(void);
void orion5x_uart1_init(void);
void orion5x_xor_init(void);
-int orion5x_crypto_init(void);
/*
* PCIe/PCI functions.
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index f4533f8..89c992b 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -401,6 +401,16 @@
Say Y here if you intend to run this kernel on Palm Zire 72
handheld computer.
+config MACH_TREO680
+ bool "Palm Treo 680"
+ default y
+ depends on ARCH_PXA_PALM
+ select PXA27x
+ select IWMMXT
+ help
+ Say Y here if you intend to run this kernel on Palm Treo 680
+ smartphone.
+
config MACH_PALMLD
bool "Palm LifeDrive"
default y
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index d18ffef..d4c6122 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -62,6 +62,7 @@
obj-$(CONFIG_MACH_PALMTX) += palmtx.o
obj-$(CONFIG_MACH_PALMLD) += palmld.o
obj-$(CONFIG_MACH_PALMZ72) += palmz72.o
+obj-$(CONFIG_MACH_TREO680) += treo680.o
obj-$(CONFIG_ARCH_VIPER) += viper.o
ifeq ($(CONFIG_MACH_ZYLONITE),y)
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 962dda2..5363e1a 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -23,6 +23,7 @@
#include <linux/pm.h>
#include <linux/gpio.h>
#include <linux/backlight.h>
+#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -600,6 +601,10 @@
&sharpsl_rom_device,
};
+static struct i2c_board_info __initdata corgi_i2c_devices[] = {
+ { I2C_BOARD_INFO("wm8731", 0x1b) },
+};
+
static void corgi_poweroff(void)
{
if (!machine_is_corgi())
@@ -634,6 +639,7 @@
pxa_set_mci_info(&corgi_mci_platform_data);
pxa_set_ficp_info(&corgi_ficp_platform_data);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(corgi_i2c_devices));
platform_scoop_config = &corgi_pcmcia_config;
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 243e080..63b10d9 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -30,6 +30,7 @@
#include <linux/apm-emulation.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
+#include <linux/regulator/userspace-consumer.h>
#include <media/soc_camera.h>
@@ -735,6 +736,7 @@
.rx_threshold = 1,
.tx_threshold = 1,
.timeout = 1000,
+ .gpio_cs = 14,
};
static unsigned long em_x270_libertas_pin_config[] = {
@@ -803,7 +805,6 @@
struct libertas_spi_platform_data em_x270_libertas_pdata = {
.use_dummy_writes = 1,
- .gpio_cs = 14,
.setup = em_x270_libertas_setup,
.teardown = em_x270_libertas_teardown,
};
@@ -838,10 +839,14 @@
static inline void em_x270_init_spi(void) {}
#endif
-#if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE)
+#if defined(CONFIG_SND_PXA2XX_LIB_AC97)
+static pxa2xx_audio_ops_t em_x270_ac97_info = {
+ .reset_gpio = 113,
+};
+
static void __init em_x270_init_ac97(void)
{
- pxa_set_ac97_info(NULL);
+ pxa_set_ac97_info(&em_x270_ac97_info);
}
#else
static inline void em_x270_init_ac97(void) {}
@@ -1038,6 +1043,52 @@
static inline void em_x270_init_camera(void) {}
#endif
+static struct regulator_bulk_data em_x270_gps_consumer_supply = {
+ .supply = "vcc gps",
+};
+
+static struct regulator_userspace_consumer_data em_x270_gps_consumer_data = {
+ .name = "vcc gps",
+ .num_supplies = 1,
+ .supplies = &em_x270_gps_consumer_supply,
+};
+
+static struct platform_device em_x270_gps_userspace_consumer = {
+ .name = "reg-userspace-consumer",
+ .id = 0,
+ .dev = {
+ .platform_data = &em_x270_gps_consumer_data,
+ },
+};
+
+static struct regulator_bulk_data em_x270_gprs_consumer_supply = {
+ .supply = "vcc gprs",
+};
+
+static struct regulator_userspace_consumer_data em_x270_gprs_consumer_data = {
+ .name = "vcc gprs",
+ .num_supplies = 1,
+ .supplies = &em_x270_gprs_consumer_supply
+};
+
+static struct platform_device em_x270_gprs_userspace_consumer = {
+ .name = "reg-userspace-consumer",
+ .id = 1,
+ .dev = {
+ .platform_data = &em_x270_gprs_consumer_data,
+ }
+};
+
+static struct platform_device *em_x270_userspace_consumers[] = {
+ &em_x270_gps_userspace_consumer,
+ &em_x270_gprs_userspace_consumer,
+};
+
+static void __init em_x270_userspace_consumers_init(void)
+{
+ platform_add_devices(ARRAY_AND_SIZE(em_x270_userspace_consumers));
+}
+
/* DA9030 related initializations */
#define REGULATOR_CONSUMER(_name, _dev, _supply) \
static struct regulator_consumer_supply _name##_consumers[] = { \
@@ -1047,11 +1098,11 @@
}, \
}
-REGULATOR_CONSUMER(ldo3, NULL, "vcc gps");
+REGULATOR_CONSUMER(ldo3, &em_x270_gps_userspace_consumer.dev, "vcc gps");
REGULATOR_CONSUMER(ldo5, NULL, "vcc cam");
REGULATOR_CONSUMER(ldo10, &pxa_device_mci.dev, "vcc sdio");
REGULATOR_CONSUMER(ldo12, NULL, "vcc usb");
-REGULATOR_CONSUMER(ldo19, NULL, "vcc gprs");
+REGULATOR_CONSUMER(ldo19, &em_x270_gprs_userspace_consumer.dev, "vcc gprs");
#define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \
static struct regulator_init_data _ldo##_data = { \
@@ -1062,6 +1113,7 @@
.enabled = 0, \
}, \
.valid_ops_mask = _ops_mask, \
+ .apply_uV = 1, \
}, \
.num_consumer_supplies = ARRAY_SIZE(_ldo##_consumers), \
.consumer_supplies = _ldo##_consumers, \
@@ -1240,6 +1292,7 @@
em_x270_init_spi();
em_x270_init_i2c();
em_x270_init_camera();
+ em_x270_userspace_consumers_init();
}
MACHINE_START(EM_X270, "Compulab EM-X270")
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 7fff467..81359d5 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -30,6 +30,7 @@
#include <linux/pwm_backlight.h>
#include <linux/regulator/bq24022.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/max1586.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/spi.h>
#include <linux/usb/gpio_vbus.h>
@@ -775,6 +776,45 @@
};
/*
+ * Maxim MAX1587A on PI2C
+ */
+
+static struct regulator_consumer_supply max1587a_consumer = {
+ .supply = "vcc_core",
+};
+
+static struct regulator_init_data max1587a_v3_info = {
+ .constraints = {
+ .name = "vcc_core range",
+ .min_uV = 900000,
+ .max_uV = 1705000,
+ .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &max1587a_consumer,
+};
+
+static struct max1586_subdev_data max1587a_subdev = {
+ .name = "vcc_core",
+ .id = MAX1586_V3,
+ .platform_data = &max1587a_v3_info,
+};
+
+static struct max1586_platform_data max1587a_info = {
+ .num_subdevs = 1,
+ .subdevs = &max1587a_subdev,
+ .v3_gain = MAX1586_GAIN_R24_3k32, /* 730..1550 mV */
+};
+
+static struct i2c_board_info __initdata pi2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("max1586", 0x14),
+ .platform_data = &max1587a_info,
+ },
+};
+
+/*
* PCMCIA
*/
@@ -828,6 +868,7 @@
pxa_set_ficp_info(&ficp_info);
pxa27x_set_i2c_power_info(NULL);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(1, ARRAY_AND_SIZE(pi2c_board_info));
pxa2xx_set_spi_info(2, &pxa_ssp2_master_info);
spi_register_board_info(ARRAY_AND_SIZE(tsc2046_board_info));
diff --git a/arch/arm/mach-pxa/include/mach/palmz72.h b/arch/arm/mach-pxa/include/mach/palmz72.h
index 5032307..2806ef6 100644
--- a/arch/arm/mach-pxa/include/mach/palmz72.h
+++ b/arch/arm/mach-pxa/include/mach/palmz72.h
@@ -21,7 +21,7 @@
/* SD/MMC */
#define GPIO_NR_PALMZ72_SD_DETECT_N 14
#define GPIO_NR_PALMZ72_SD_POWER_N 98
-#define GPIO_NR_PALMZ72_SD_RO 115
+#define GPIO_NR_PALMZ72_SD_RO 115
/* Touchscreen */
#define GPIO_NR_PALMZ72_WM9712_IRQ 27
@@ -31,8 +31,7 @@
/* USB */
#define GPIO_NR_PALMZ72_USB_DETECT_N 15
-#define GPIO_NR_PALMZ72_USB_POWER 95
-#define GPIO_NR_PALMZ72_USB_PULLUP 12
+#define GPIO_NR_PALMZ72_USB_PULLUP 95
/* LCD/Backlight */
#define GPIO_NR_PALMZ72_BL_POWER 20
diff --git a/arch/arm/mach-pxa/include/mach/treo680.h b/arch/arm/mach-pxa/include/mach/treo680.h
new file mode 100644
index 0000000..af443b2
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/treo680.h
@@ -0,0 +1,49 @@
+/*
+ * GPIOs and interrupts for Palm Treo 680 smartphone
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _INCLUDE_TREO680_H_
+#define _INCLUDE_TREO680_H_
+
+/* GPIOs */
+#define GPIO_NR_TREO680_POWER_DETECT 0
+#define GPIO_NR_TREO680_AMP_EN 27
+#define GPIO_NR_TREO680_KEYB_BL 24
+#define GPIO_NR_TREO680_VIBRATE_EN 44
+#define GPIO_NR_TREO680_GREEN_LED 20
+#define GPIO_NR_TREO680_RED_LED 79
+#define GPIO_NR_TREO680_SD_DETECT_N 113
+#define GPIO_NR_TREO680_SD_READONLY 33
+#define GPIO_NR_TREO680_EP_DETECT_N 116
+#define GPIO_NR_TREO680_SD_POWER 42
+#define GPIO_NR_TREO680_USB_DETECT 1
+#define GPIO_NR_TREO680_USB_PULLUP 114
+#define GPIO_NR_TREO680_GSM_POWER 40
+#define GPIO_NR_TREO680_GSM_RESET 87
+#define GPIO_NR_TREO680_GSM_WAKE 57
+#define GPIO_NR_TREO680_GSM_HOST_WAKE 14
+#define GPIO_NR_TREO680_GSM_TRIGGER 10
+#define GPIO_NR_TREO680_BT_EN 43
+#define GPIO_NR_TREO680_IR_EN 115
+#define GPIO_NR_TREO680_IR_TXD 47
+#define GPIO_NR_TREO680_BL_POWER 38
+#define GPIO_NR_TREO680_LCD_POWER 25
+
+/* Various addresses */
+#define TREO680_PHYS_RAM_START 0xa0000000
+#define TREO680_PHYS_IO_START 0x40000000
+#define TREO680_STR_BASE 0xa2000000
+
+/* BACKLIGHT */
+#define TREO680_MAX_INTENSITY 254
+#define TREO680_DEFAULT_INTENSITY 160
+#define TREO680_LIMIT_MASK 0x7F
+#define TREO680_PRESCALER 63
+#define TREO680_PERIOD_NS 3500
+
+#endif
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 4dc8c2e..2d28132 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -37,6 +37,7 @@
#include <linux/wm97xx_batt.h>
#include <linux/mtd/physmap.h>
#include <linux/usb/gpio_vbus.h>
+#include <linux/regulator/max1586.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -717,6 +718,38 @@
};
/*
+ * Voltage regulation
+ */
+static struct regulator_consumer_supply max1586_consumers[] = {
+ {
+ .supply = "vcc_core",
+ }
+};
+
+static struct regulator_init_data max1586_v3_info = {
+ .constraints = {
+ .name = "vcc_core range",
+ .min_uV = 1000000,
+ .max_uV = 1705000,
+ .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(max1586_consumers),
+ .consumer_supplies = max1586_consumers,
+};
+
+static struct max1586_subdev_data max1586_subdevs[] = {
+ { .name = "vcc_core", .id = MAX1586_V3,
+ .platform_data = &max1586_v3_info },
+};
+
+static struct max1586_platform_data max1586_info = {
+ .subdevs = max1586_subdevs,
+ .num_subdevs = ARRAY_SIZE(max1586_subdevs),
+ .v3_gain = MAX1586_GAIN_NO_R24, /* 700..1475 mV */
+};
+
+/*
* Camera interface
*/
struct pxacamera_platform_data mioa701_pxacamera_platform_data = {
@@ -725,6 +758,13 @@
.mclk_10khz = 5000,
};
+static struct i2c_board_info __initdata mioa701_pi2c_devices[] = {
+ {
+ I2C_BOARD_INFO("max1586", 0x14),
+ .platform_data = &max1586_info,
+ },
+};
+
static struct soc_camera_link iclink = {
.bus_id = 0, /* Must match id in pxa27x_device_camera in device.c */
};
@@ -825,7 +865,9 @@
platform_add_devices(devices, ARRAY_SIZE(devices));
gsm_init();
+ i2c_register_board_info(1, ARRAY_AND_SIZE(mioa701_pi2c_devices));
pxa_set_i2c_info(&i2c_pdata);
+ pxa27x_set_i2c_power_info(NULL);
pxa_set_camera_info(&mioa701_pxacamera_platform_data);
i2c_register_board_info(0, ARRAY_AND_SIZE(mioa701_i2c_devices));
}
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index b88eb4d..c3645aa 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -27,7 +27,9 @@
#include <linux/pda_power.h>
#include <linux/pwm_backlight.h>
#include <linux/gpio.h>
+#include <linux/wm97xx_batt.h>
#include <linux/power_supply.h>
+#include <linux/usb/gpio_vbus.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -41,6 +43,8 @@
#include <mach/irda.h>
#include <mach/pxa27x_keypad.h>
#include <mach/udc.h>
+#include <mach/palmasoc.h>
+
#include <mach/pm.h>
#include "generic.h"
@@ -66,6 +70,8 @@
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
+ GPIO89_AC97_SYSCLK,
+ GPIO113_AC97_nRESET,
/* IrDA */
GPIO49_GPIO, /* ir disable */
@@ -77,8 +83,7 @@
/* USB */
GPIO15_GPIO, /* usb detect */
- GPIO12_GPIO, /* usb pullup */
- GPIO95_GPIO, /* usb power */
+ GPIO95_GPIO, /* usb pullup */
/* Matrix keypad */
GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
@@ -355,6 +360,22 @@
};
/******************************************************************************
+ * UDC
+ ******************************************************************************/
+static struct gpio_vbus_mach_info palmz72_udc_info = {
+ .gpio_vbus = GPIO_NR_PALMZ72_USB_DETECT_N,
+ .gpio_pullup = GPIO_NR_PALMZ72_USB_PULLUP,
+};
+
+static struct platform_device palmz72_gpio_vbus = {
+ .name = "gpio-vbus",
+ .id = -1,
+ .dev = {
+ .platform_data = &palmz72_udc_info,
+ },
+};
+
+/******************************************************************************
* Power supply
******************************************************************************/
static int power_supply_init(struct device *dev)
@@ -422,6 +443,31 @@
};
/******************************************************************************
+ * WM97xx battery
+ ******************************************************************************/
+static struct wm97xx_batt_info wm97xx_batt_pdata = {
+ .batt_aux = WM97XX_AUX_ID3,
+ .temp_aux = WM97XX_AUX_ID2,
+ .charge_gpio = -1,
+ .max_voltage = PALMZ72_BAT_MAX_VOLTAGE,
+ .min_voltage = PALMZ72_BAT_MIN_VOLTAGE,
+ .batt_mult = 1000,
+ .batt_div = 414,
+ .temp_mult = 1,
+ .temp_div = 1,
+ .batt_tech = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .batt_name = "main-batt",
+};
+
+/******************************************************************************
+ * aSoC audio
+ ******************************************************************************/
+static struct platform_device palmz72_asoc = {
+ .name = "palm27x-asoc",
+ .id = -1,
+};
+
+/******************************************************************************
* Framebuffer
******************************************************************************/
static struct pxafb_mode_info palmz72_lcd_modes[] = {
@@ -527,17 +573,32 @@
static struct platform_device *devices[] __initdata = {
&palmz72_backlight,
&palmz72_leds,
+ &palmz72_asoc,
&power_supply,
+ &palmz72_gpio_vbus,
};
+/* setup udc GPIOs initial state */
+static void __init palmz72_udc_init(void)
+{
+ if (!gpio_request(GPIO_NR_PALMZ72_USB_PULLUP, "USB Pullup")) {
+ gpio_direction_output(GPIO_NR_PALMZ72_USB_PULLUP, 0);
+ gpio_free(GPIO_NR_PALMZ72_USB_PULLUP);
+ }
+}
+
static void __init palmz72_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(palmz72_pin_config));
+
set_pxa_fb_info(&palmz72_lcd_screen);
pxa_set_mci_info(&palmz72_mci_platform_data);
+ palmz72_udc_init();
pxa_set_ac97_info(NULL);
pxa_set_ficp_info(&palmz72_ficp_platform_data);
pxa_set_keypad_info(&palmz72_keypad_platform_data);
+ wm97xx_bat_set_pdata(&wm97xx_batt_pdata);
+
platform_add_devices(devices, ARRAY_SIZE(devices));
}
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index ac431ed..9352d4a 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/mtd/sharpsl.h>
@@ -486,6 +487,10 @@
&sharpsl_rom_device,
};
+static struct i2c_board_info __initdata poodle_i2c_devices[] = {
+ { I2C_BOARD_INFO("wm8731", 0x1b) },
+};
+
static void poodle_poweroff(void)
{
arm_machine_restart('h', NULL);
@@ -519,6 +524,7 @@
pxa_set_mci_info(&poodle_mci_platform_data);
pxa_set_ficp_info(&poodle_ficp_platform_data);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
poodle_init_spi();
}
diff --git a/arch/arm/mach-pxa/treo680.c b/arch/arm/mach-pxa/treo680.c
new file mode 100644
index 0000000..a06f19e
--- /dev/null
+++ b/arch/arm/mach-pxa/treo680.c
@@ -0,0 +1,612 @@
+/*
+ * Hardware definitions for Palm Treo 680
+ *
+ * Author: Tomas Cech <sleep_walker@suse.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * (find more info at www.hackndev.com)
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/pda_power.h>
+#include <linux/pwm_backlight.h>
+#include <linux/gpio.h>
+#include <linux/wm97xx_batt.h>
+#include <linux/power_supply.h>
+#include <linux/sysdev.h>
+#include <linux/w1-gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/pxa27x.h>
+#include <mach/pxa27x-udc.h>
+#include <mach/audio.h>
+#include <mach/treo680.h>
+#include <mach/mmc.h>
+#include <mach/pxafb.h>
+#include <mach/irda.h>
+#include <mach/pxa27x_keypad.h>
+#include <mach/udc.h>
+#include <mach/ohci.h>
+#include <mach/pxa2xx-regs.h>
+#include <mach/palmasoc.h>
+#include <mach/camera.h>
+
+#include <sound/pxa2xx-lib.h>
+
+#include "generic.h"
+#include "devices.h"
+
+/******************************************************************************
+ * Pin configuration
+ ******************************************************************************/
+static unsigned long treo680_pin_config[] __initdata = {
+ /* MMC */
+ GPIO32_MMC_CLK,
+ GPIO92_MMC_DAT_0,
+ GPIO109_MMC_DAT_1,
+ GPIO110_MMC_DAT_2,
+ GPIO111_MMC_DAT_3,
+ GPIO112_MMC_CMD,
+ GPIO33_GPIO, /* SD read only */
+ GPIO113_GPIO, /* SD detect */
+
+ /* AC97 */
+ GPIO28_AC97_BITCLK,
+ GPIO29_AC97_SDATA_IN_0,
+ GPIO30_AC97_SDATA_OUT,
+ GPIO31_AC97_SYNC,
+ GPIO89_AC97_SYSCLK,
+ GPIO95_AC97_nRESET,
+
+ /* IrDA */
+ GPIO46_FICP_RXD,
+ GPIO47_FICP_TXD,
+
+ /* PWM */
+ GPIO16_PWM0_OUT,
+
+ /* USB */
+ GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, /* usb detect */
+
+ /* MATRIX KEYPAD */
+ GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO101_KP_MKIN_1,
+ GPIO102_KP_MKIN_2,
+ GPIO97_KP_MKIN_3,
+ GPIO98_KP_MKIN_4,
+ GPIO99_KP_MKIN_5,
+ GPIO91_KP_MKIN_6,
+ GPIO13_KP_MKIN_7,
+ GPIO103_KP_MKOUT_0 | MFP_LPM_DRIVE_HIGH,
+ GPIO104_KP_MKOUT_1,
+ GPIO105_KP_MKOUT_2,
+ GPIO106_KP_MKOUT_3,
+ GPIO107_KP_MKOUT_4,
+ GPIO108_KP_MKOUT_5,
+ GPIO96_KP_MKOUT_6,
+ GPIO93_KP_DKIN_0 | WAKEUP_ON_LEVEL_HIGH, /* Hotsync button */
+
+ /* LCD */
+ GPIO58_LCD_LDD_0,
+ GPIO59_LCD_LDD_1,
+ GPIO60_LCD_LDD_2,
+ GPIO61_LCD_LDD_3,
+ GPIO62_LCD_LDD_4,
+ GPIO63_LCD_LDD_5,
+ GPIO64_LCD_LDD_6,
+ GPIO65_LCD_LDD_7,
+ GPIO66_LCD_LDD_8,
+ GPIO67_LCD_LDD_9,
+ GPIO68_LCD_LDD_10,
+ GPIO69_LCD_LDD_11,
+ GPIO70_LCD_LDD_12,
+ GPIO71_LCD_LDD_13,
+ GPIO72_LCD_LDD_14,
+ GPIO73_LCD_LDD_15,
+ GPIO74_LCD_FCLK,
+ GPIO75_LCD_LCLK,
+ GPIO76_LCD_PCLK,
+
+ /* Quick Capture Interface */
+ GPIO84_CIF_FV,
+ GPIO85_CIF_LV,
+ GPIO53_CIF_MCLK,
+ GPIO54_CIF_PCLK,
+ GPIO81_CIF_DD_0,
+ GPIO55_CIF_DD_1,
+ GPIO51_CIF_DD_2,
+ GPIO50_CIF_DD_3,
+ GPIO52_CIF_DD_4,
+ GPIO48_CIF_DD_5,
+ GPIO17_CIF_DD_6,
+ GPIO12_CIF_DD_7,
+
+ /* I2C */
+ GPIO117_I2C_SCL,
+ GPIO118_I2C_SDA,
+
+ /* GSM */
+ GPIO14_GPIO | WAKEUP_ON_EDGE_BOTH, /* GSM host wake up */
+ GPIO34_FFUART_RXD,
+ GPIO35_FFUART_CTS,
+ GPIO39_FFUART_TXD,
+ GPIO41_FFUART_RTS,
+
+ /* MISC. */
+ GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH, /* external power detect */
+ GPIO15_GPIO | WAKEUP_ON_EDGE_BOTH, /* silent switch */
+ GPIO116_GPIO, /* headphone detect */
+ GPIO11_GPIO | WAKEUP_ON_EDGE_BOTH, /* bluetooth host wake up */
+};
+
+/******************************************************************************
+ * SD/MMC card controller
+ ******************************************************************************/
+static int treo680_mci_init(struct device *dev,
+ irq_handler_t treo680_detect_int, void *data)
+{
+ int err = 0;
+
+ /* Setup an interrupt for detecting card insert/remove events */
+ err = gpio_request(GPIO_NR_TREO680_SD_DETECT_N, "SD IRQ");
+
+ if (err)
+ goto err;
+
+ err = gpio_direction_input(GPIO_NR_TREO680_SD_DETECT_N);
+ if (err)
+ goto err2;
+
+ err = request_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N),
+ treo680_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "SD/MMC card detect", data);
+
+ if (err) {
+ dev_err(dev, "%s: cannot request SD/MMC card detect IRQ\n",
+ __func__);
+ goto err2;
+ }
+
+ err = gpio_request(GPIO_NR_TREO680_SD_POWER, "SD_POWER");
+ if (err)
+ goto err3;
+
+ err = gpio_direction_output(GPIO_NR_TREO680_SD_POWER, 1);
+ if (err)
+ goto err4;
+
+ err = gpio_request(GPIO_NR_TREO680_SD_READONLY, "SD_READONLY");
+ if (err)
+ goto err4;
+
+ err = gpio_direction_input(GPIO_NR_TREO680_SD_READONLY);
+ if (err)
+ goto err5;
+
+ return 0;
+
+err5:
+ gpio_free(GPIO_NR_TREO680_SD_READONLY);
+err4:
+ gpio_free(GPIO_NR_TREO680_SD_POWER);
+err3:
+ free_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N), data);
+err2:
+ gpio_free(GPIO_NR_TREO680_SD_DETECT_N);
+err:
+ return err;
+}
+
+static void treo680_mci_exit(struct device *dev, void *data)
+{
+ gpio_free(GPIO_NR_TREO680_SD_READONLY);
+ gpio_free(GPIO_NR_TREO680_SD_POWER);
+ free_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N), data);
+ gpio_free(GPIO_NR_TREO680_SD_DETECT_N);
+}
+
+static void treo680_mci_power(struct device *dev, unsigned int vdd)
+{
+ struct pxamci_platform_data *p_d = dev->platform_data;
+ gpio_set_value(GPIO_NR_TREO680_SD_POWER, p_d->ocr_mask & (1 << vdd));
+}
+
+static int treo680_mci_get_ro(struct device *dev)
+{
+ return gpio_get_value(GPIO_NR_TREO680_SD_READONLY);
+}
+
+static struct pxamci_platform_data treo680_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .setpower = treo680_mci_power,
+ .get_ro = treo680_mci_get_ro,
+ .init = treo680_mci_init,
+ .exit = treo680_mci_exit,
+};
+
+/******************************************************************************
+ * GPIO keyboard
+ ******************************************************************************/
+static unsigned int treo680_matrix_keys[] = {
+ KEY(0, 0, KEY_F8), /* Red/Off/Power */
+ KEY(0, 1, KEY_LEFT),
+ KEY(0, 2, KEY_LEFTCTRL), /* Alternate */
+ KEY(0, 3, KEY_L),
+ KEY(0, 4, KEY_A),
+ KEY(0, 5, KEY_Q),
+ KEY(0, 6, KEY_P),
+
+ KEY(1, 0, KEY_RIGHTCTRL), /* Menu */
+ KEY(1, 1, KEY_RIGHT),
+ KEY(1, 2, KEY_LEFTSHIFT), /* Left shift */
+ KEY(1, 3, KEY_Z),
+ KEY(1, 4, KEY_S),
+ KEY(1, 5, KEY_W),
+
+ KEY(2, 0, KEY_F1), /* Phone */
+ KEY(2, 1, KEY_UP),
+ KEY(2, 2, KEY_0),
+ KEY(2, 3, KEY_X),
+ KEY(2, 4, KEY_D),
+ KEY(2, 5, KEY_E),
+
+ KEY(3, 0, KEY_F10), /* Calendar */
+ KEY(3, 1, KEY_DOWN),
+ KEY(3, 2, KEY_SPACE),
+ KEY(3, 3, KEY_C),
+ KEY(3, 4, KEY_F),
+ KEY(3, 5, KEY_R),
+
+ KEY(4, 0, KEY_F12), /* Mail */
+ KEY(4, 1, KEY_KPENTER),
+ KEY(4, 2, KEY_RIGHTALT), /* Alt */
+ KEY(4, 3, KEY_V),
+ KEY(4, 4, KEY_G),
+ KEY(4, 5, KEY_T),
+
+ KEY(5, 0, KEY_F9), /* Home */
+ KEY(5, 1, KEY_PAGEUP), /* Side up */
+ KEY(5, 2, KEY_DOT),
+ KEY(5, 3, KEY_B),
+ KEY(5, 4, KEY_H),
+ KEY(5, 5, KEY_Y),
+
+ KEY(6, 0, KEY_TAB), /* Side Activate */
+ KEY(6, 1, KEY_PAGEDOWN), /* Side down */
+ KEY(6, 2, KEY_ENTER),
+ KEY(6, 3, KEY_N),
+ KEY(6, 4, KEY_J),
+ KEY(6, 5, KEY_U),
+
+ KEY(7, 0, KEY_F6), /* Green/Call */
+ KEY(7, 1, KEY_O),
+ KEY(7, 2, KEY_BACKSPACE),
+ KEY(7, 3, KEY_M),
+ KEY(7, 4, KEY_K),
+ KEY(7, 5, KEY_I),
+};
+
+static struct pxa27x_keypad_platform_data treo680_keypad_platform_data = {
+ .matrix_key_rows = 8,
+ .matrix_key_cols = 7,
+ .matrix_key_map = treo680_matrix_keys,
+ .matrix_key_map_size = ARRAY_SIZE(treo680_matrix_keys),
+ .direct_key_map = { KEY_CONNECT },
+ .direct_key_num = 1,
+
+ .debounce_interval = 30,
+};
+
+/******************************************************************************
+ * aSoC audio
+ ******************************************************************************/
+
+static pxa2xx_audio_ops_t treo680_ac97_pdata = {
+ .reset_gpio = 95,
+};
+
+/******************************************************************************
+ * Backlight
+ ******************************************************************************/
+static int treo680_backlight_init(struct device *dev)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_TREO680_BL_POWER, "BL POWER");
+ if (ret)
+ goto err;
+ ret = gpio_direction_output(GPIO_NR_TREO680_BL_POWER, 0);
+ if (ret)
+ goto err2;
+ ret = gpio_request(GPIO_NR_TREO680_LCD_POWER, "LCD POWER");
+ if (ret)
+ goto err2;
+ ret = gpio_direction_output(GPIO_NR_TREO680_LCD_POWER, 0);
+ if (ret)
+ goto err3;
+
+ return 0;
+err3:
+ gpio_free(GPIO_NR_TREO680_LCD_POWER);
+err2:
+ gpio_free(GPIO_NR_TREO680_BL_POWER);
+err:
+ return ret;
+}
+
+static int treo680_backlight_notify(int brightness)
+{
+ gpio_set_value(GPIO_NR_TREO680_BL_POWER, brightness);
+ return TREO680_MAX_INTENSITY - brightness;
+};
+
+static void treo680_backlight_exit(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_BL_POWER);
+ gpio_free(GPIO_NR_TREO680_LCD_POWER);
+}
+
+static struct platform_pwm_backlight_data treo680_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = TREO680_MAX_INTENSITY,
+ .dft_brightness = TREO680_DEFAULT_INTENSITY,
+ .pwm_period_ns = TREO680_PERIOD_NS,
+ .init = treo680_backlight_init,
+ .notify = treo680_backlight_notify,
+ .exit = treo680_backlight_exit,
+};
+
+static struct platform_device treo680_backlight = {
+ .name = "pwm-backlight",
+ .dev = {
+ .parent = &pxa27x_device_pwm0.dev,
+ .platform_data = &treo680_backlight_data,
+ },
+};
+
+/******************************************************************************
+ * IrDA
+ ******************************************************************************/
+static void treo680_transceiver_mode(struct device *dev, int mode)
+{
+ gpio_set_value(GPIO_NR_TREO680_IR_EN, mode & IR_OFF);
+ pxa2xx_transceiver_mode(dev, mode);
+}
+
+static int treo680_irda_startup(struct device *dev)
+{
+ int err;
+
+ err = gpio_request(GPIO_NR_TREO680_IR_EN, "Ir port disable");
+ if (err)
+ goto err1;
+
+ err = gpio_direction_output(GPIO_NR_TREO680_IR_EN, 1);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ dev_err(dev, "treo680_irda: cannot change IR gpio direction\n");
+ gpio_free(GPIO_NR_TREO680_IR_EN);
+err1:
+ dev_err(dev, "treo680_irda: cannot allocate IR gpio\n");
+ return err;
+}
+
+static void treo680_irda_shutdown(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_AMP_EN);
+}
+
+static struct pxaficp_platform_data treo680_ficp_info = {
+ .transceiver_cap = IR_FIRMODE | IR_SIRMODE | IR_OFF,
+ .startup = treo680_irda_startup,
+ .shutdown = treo680_irda_shutdown,
+ .transceiver_mode = treo680_transceiver_mode,
+};
+
+/******************************************************************************
+ * UDC
+ ******************************************************************************/
+static struct pxa2xx_udc_mach_info treo680_udc_info __initdata = {
+ .gpio_vbus = GPIO_NR_TREO680_USB_DETECT,
+ .gpio_vbus_inverted = 1,
+ .gpio_pullup = GPIO_NR_TREO680_USB_PULLUP,
+};
+
+
+/******************************************************************************
+ * USB host
+ ******************************************************************************/
+static struct pxaohci_platform_data treo680_ohci_info = {
+ .port_mode = PMM_PERPORT_MODE,
+ .flags = ENABLE_PORT1 | ENABLE_PORT3,
+ .power_budget = 0,
+};
+
+/******************************************************************************
+ * Power supply
+ ******************************************************************************/
+static int power_supply_init(struct device *dev)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_TREO680_POWER_DETECT, "CABLE_STATE_AC");
+ if (ret)
+ goto err1;
+ ret = gpio_direction_input(GPIO_NR_TREO680_POWER_DETECT);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ gpio_free(GPIO_NR_TREO680_POWER_DETECT);
+err1:
+ return ret;
+}
+
+static int treo680_is_ac_online(void)
+{
+ return gpio_get_value(GPIO_NR_TREO680_POWER_DETECT);
+}
+
+static void power_supply_exit(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_POWER_DETECT);
+}
+
+static char *treo680_supplicants[] = {
+ "main-battery",
+};
+
+static struct pda_power_pdata power_supply_info = {
+ .init = power_supply_init,
+ .is_ac_online = treo680_is_ac_online,
+ .exit = power_supply_exit,
+ .supplied_to = treo680_supplicants,
+ .num_supplicants = ARRAY_SIZE(treo680_supplicants),
+};
+
+static struct platform_device power_supply = {
+ .name = "pda-power",
+ .id = -1,
+ .dev = {
+ .platform_data = &power_supply_info,
+ },
+};
+
+/******************************************************************************
+ * Vibra and LEDs
+ ******************************************************************************/
+static struct gpio_led gpio_leds[] = {
+ {
+ .name = "treo680:vibra:vibra",
+ .default_trigger = "none",
+ .gpio = GPIO_NR_TREO680_VIBRATE_EN,
+ },
+ {
+ .name = "treo680:green:led",
+ .default_trigger = "mmc0",
+ .gpio = GPIO_NR_TREO680_GREEN_LED,
+ },
+ {
+ .name = "treo680:keybbl:keybbl",
+ .default_trigger = "none",
+ .gpio = GPIO_NR_TREO680_KEYB_BL,
+ },
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device treo680_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_led_info,
+ }
+};
+
+
+/******************************************************************************
+ * Framebuffer
+ ******************************************************************************/
+/* TODO: add support for 324x324 */
+static struct pxafb_mode_info treo680_lcd_modes[] = {
+{
+ .pixclock = 86538,
+ .xres = 320,
+ .yres = 320,
+ .bpp = 16,
+
+ .left_margin = 20,
+ .right_margin = 8,
+ .upper_margin = 8,
+ .lower_margin = 5,
+
+ .hsync_len = 4,
+ .vsync_len = 1,
+},
+};
+
+static struct pxafb_mach_info treo680_lcd_screen = {
+ .modes = treo680_lcd_modes,
+ .num_modes = ARRAY_SIZE(treo680_lcd_modes),
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
+};
+
+/******************************************************************************
+ * Power management - standby
+ ******************************************************************************/
+static void __init treo680_pm_init(void)
+{
+ static u32 resume[] = {
+ 0xe3a00101, /* mov r0, #0x40000000 */
+ 0xe380060f, /* orr r0, r0, #0x00f00000 */
+ 0xe590f008, /* ldr pc, [r0, #0x08] */
+ };
+
+ /* this is where the bootloader jumps */
+ memcpy(phys_to_virt(TREO680_STR_BASE), resume, sizeof(resume));
+}
+
+/******************************************************************************
+ * Machine init
+ ******************************************************************************/
+static struct platform_device *devices[] __initdata = {
+ &treo680_backlight,
+ &treo680_leds,
+ &power_supply,
+};
+
+/* setup udc GPIOs initial state */
+static void __init treo680_udc_init(void)
+{
+ if (!gpio_request(GPIO_NR_TREO680_USB_PULLUP, "UDC Vbus")) {
+ gpio_direction_output(GPIO_NR_TREO680_USB_PULLUP, 1);
+ gpio_free(GPIO_NR_TREO680_USB_PULLUP);
+ }
+}
+
+static void __init treo680_init(void)
+{
+ treo680_pm_init();
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(treo680_pin_config));
+ pxa_set_keypad_info(&treo680_keypad_platform_data);
+ set_pxa_fb_info(&treo680_lcd_screen);
+ pxa_set_mci_info(&treo680_mci_platform_data);
+ treo680_udc_init();
+ pxa_set_udc_info(&treo680_udc_info);
+ pxa_set_ac97_info(&treo680_ac97_pdata);
+ pxa_set_ficp_info(&treo680_ficp_info);
+ pxa_set_ohci_info(&treo680_ohci_info);
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+MACHINE_START(TREO680, "Palm Treo 680")
+ .phys_io = TREO680_PHYS_IO_START,
+ .io_pg_offst = io_p2v(0x40000000),
+ .boot_params = 0xa0000100,
+ .map_io = pxa_map_io,
+ .init_irq = pxa27x_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = treo680_init,
+MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 1fe294d..ede2a57 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -27,6 +27,7 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
diff --git a/arch/arm/mach-s3c2410/usb-simtec.c b/arch/arm/mach-s3c2410/usb-simtec.c
index 6cd9377..50e25fc 100644
--- a/arch/arm/mach-s3c2410/usb-simtec.c
+++ b/arch/arm/mach-s3c2410/usb-simtec.c
@@ -22,7 +22,6 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index 5df73cb..8cfeaec 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -84,5 +84,15 @@
help
Say Y here if you are using the AT2440EVB development board
+config MACH_MINI2440
+ bool "MINI2440 development board"
+ select CPU_S3C2440
+ select EEPROM_AT24
+ select LEDS_TRIGGER_BACKLIGHT
+ select SND_S3C24XX_SOC_S3C24XX_UDA134X
+ help
+ Say Y here to select support for the MINI2440. Is a 10cm x 10cm board
+ available via various sources. It can come with a 3.5" or 7" touch LCD.
+
endmenu
diff --git a/arch/arm/mach-s3c2440/Makefile b/arch/arm/mach-s3c2440/Makefile
index 0b4440e..bfadcf6 100644
--- a/arch/arm/mach-s3c2440/Makefile
+++ b/arch/arm/mach-s3c2440/Makefile
@@ -22,3 +22,4 @@
obj-$(CONFIG_ARCH_S3C2440) += mach-smdk2440.o
obj-$(CONFIG_MACH_NEXCODER_2440) += mach-nexcoder.o
obj-$(CONFIG_MACH_AT2440EVB) += mach-at2440evb.o
+obj-$(CONFIG_MACH_MINI2440) += mach-mini2440.o
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
new file mode 100644
index 0000000..6a5bc30
--- /dev/null
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -0,0 +1,703 @@
+/* linux/arch/arm/mach-s3c2440/mach-mini2440.c
+ *
+ * Copyright (c) 2008 Ramax Lo <ramaxlo@gmail.com>
+ * Based on mach-anubis.c by Ben Dooks <ben@simtec.co.uk>
+ * and modifications by SBZ <sbz@spgui.org> and
+ * Weibing <http://weibing.blogbus.com> and
+ * Michel Pollet <buserror@gmail.com>
+ *
+ * For product information, visit http://code.google.com/p/mini2440/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/dm9000.h>
+#include <linux/i2c/at24.h>
+#include <linux/platform_device.h>
+#include <linux/gpio_keys.h>
+#include <linux/i2c.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/hardware.h>
+#include <mach/fb.h>
+#include <asm/mach-types.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+#include <mach/leds-gpio.h>
+#include <mach/regs-mem.h>
+#include <mach/regs-lcd.h>
+#include <mach/irqs.h>
+#include <plat/nand.h>
+#include <plat/iic.h>
+#include <plat/mci.h>
+#include <plat/udc.h>
+
+#include <plat/regs-serial.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+#include <sound/s3c24xx_uda134x.h>
+
+#define MACH_MINI2440_DM9K_BASE (S3C2410_CS4 + 0x300)
+
+static struct map_desc mini2440_iodesc[] __initdata = {
+ /* nothing to declare, move along */
+};
+
+#define UCON S3C2410_UCON_DEFAULT
+#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
+#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+
+
+static struct s3c2410_uartcfg mini2440_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+};
+
+/* USB device UDC support */
+
+static void mini2440_udc_pullup(enum s3c2410_udc_cmd_e cmd)
+{
+ pr_debug("udc: pullup(%d)\n", cmd);
+
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE :
+ s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
+ break;
+ case S3C2410_UDC_P_DISABLE :
+ s3c2410_gpio_setpin(S3C2410_GPC(5), 0);
+ break;
+ case S3C2410_UDC_P_RESET :
+ break;
+ default:
+ break;
+ }
+}
+
+static struct s3c2410_udc_mach_info mini2440_udc_cfg __initdata = {
+ .udc_command = mini2440_udc_pullup,
+};
+
+
+/* LCD timing and setup */
+
+/*
+ * This macro simplifies the table bellow
+ */
+#define _LCD_DECLARE(_clock,_xres,margin_left,margin_right,hsync, \
+ _yres,margin_top,margin_bottom,vsync, refresh) \
+ .width = _xres, \
+ .xres = _xres, \
+ .height = _yres, \
+ .yres = _yres, \
+ .left_margin = margin_left, \
+ .right_margin = margin_right, \
+ .upper_margin = margin_top, \
+ .lower_margin = margin_bottom, \
+ .hsync_len = hsync, \
+ .vsync_len = vsync, \
+ .pixclock = ((_clock*100000000000LL) / \
+ ((refresh) * \
+ (hsync + margin_left + _xres + margin_right) * \
+ (vsync + margin_top + _yres + margin_bottom))), \
+ .bpp = 16,\
+ .type = (S3C2410_LCDCON1_TFT16BPP |\
+ S3C2410_LCDCON1_TFT)
+
+struct s3c2410fb_display mini2440_lcd_cfg[] __initdata = {
+ [0] = { /* mini2440 + 3.5" TFT + touchscreen */
+ _LCD_DECLARE(
+ 7, /* The 3.5 is quite fast */
+ 240, 21, 38, 6, /* x timing */
+ 320, 4, 4, 2, /* y timing */
+ 60), /* refresh rate */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_INVVLINE |
+ S3C2410_LCDCON5_INVVFRAME |
+ S3C2410_LCDCON5_INVVDEN |
+ S3C2410_LCDCON5_PWREN),
+ },
+ [1] = { /* mini2440 + 7" TFT + touchscreen */
+ _LCD_DECLARE(
+ 10, /* the 7" runs slower */
+ 800, 40, 40, 48, /* x timing */
+ 480, 29, 3, 3, /* y timing */
+ 50), /* refresh rate */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_INVVLINE |
+ S3C2410_LCDCON5_INVVFRAME |
+ S3C2410_LCDCON5_PWREN),
+ },
+ /* The VGA shield can outout at several resolutions. All share
+ * the same timings, however, anything smaller than 1024x768
+ * will only be displayed in the top left corner of a 1024x768
+ * XGA output unless you add optional dip switches to the shield.
+ * Therefore timings for other resolutions have been ommited here.
+ */
+ [2] = {
+ _LCD_DECLARE(
+ 10,
+ 1024, 1, 2, 2, /* y timing */
+ 768, 200, 16, 16, /* x timing */
+ 24), /* refresh rate, maximum stable,
+ tested with the FPGA shield */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_HWSWP),
+ },
+};
+
+/* todo - put into gpio header */
+
+#define S3C2410_GPCCON_MASK(x) (3 << ((x) * 2))
+#define S3C2410_GPDCON_MASK(x) (3 << ((x) * 2))
+
+struct s3c2410fb_mach_info mini2440_fb_info __initdata = {
+ .displays = &mini2440_lcd_cfg[0], /* not constant! see init */
+ .num_displays = 1,
+ .default_display = 0,
+
+ /* Enable VD[2..7], VD[10..15], VD[18..23] and VCLK, syncs, VDEN
+ * and disable the pull down resistors on pins we are using for LCD
+ * data. */
+
+ .gpcup = (0xf << 1) | (0x3f << 10),
+
+ .gpccon = (S3C2410_GPC1_VCLK | S3C2410_GPC2_VLINE |
+ S3C2410_GPC3_VFRAME | S3C2410_GPC4_VM |
+ S3C2410_GPC10_VD2 | S3C2410_GPC11_VD3 |
+ S3C2410_GPC12_VD4 | S3C2410_GPC13_VD5 |
+ S3C2410_GPC14_VD6 | S3C2410_GPC15_VD7),
+
+ .gpccon_mask = (S3C2410_GPCCON_MASK(1) | S3C2410_GPCCON_MASK(2) |
+ S3C2410_GPCCON_MASK(3) | S3C2410_GPCCON_MASK(4) |
+ S3C2410_GPCCON_MASK(10) | S3C2410_GPCCON_MASK(11) |
+ S3C2410_GPCCON_MASK(12) | S3C2410_GPCCON_MASK(13) |
+ S3C2410_GPCCON_MASK(14) | S3C2410_GPCCON_MASK(15)),
+
+ .gpdup = (0x3f << 2) | (0x3f << 10),
+
+ .gpdcon = (S3C2410_GPD2_VD10 | S3C2410_GPD3_VD11 |
+ S3C2410_GPD4_VD12 | S3C2410_GPD5_VD13 |
+ S3C2410_GPD6_VD14 | S3C2410_GPD7_VD15 |
+ S3C2410_GPD10_VD18 | S3C2410_GPD11_VD19 |
+ S3C2410_GPD12_VD20 | S3C2410_GPD13_VD21 |
+ S3C2410_GPD14_VD22 | S3C2410_GPD15_VD23),
+
+ .gpdcon_mask = (S3C2410_GPDCON_MASK(2) | S3C2410_GPDCON_MASK(3) |
+ S3C2410_GPDCON_MASK(4) | S3C2410_GPDCON_MASK(5) |
+ S3C2410_GPDCON_MASK(6) | S3C2410_GPDCON_MASK(7) |
+ S3C2410_GPDCON_MASK(10) | S3C2410_GPDCON_MASK(11)|
+ S3C2410_GPDCON_MASK(12) | S3C2410_GPDCON_MASK(13)|
+ S3C2410_GPDCON_MASK(14) | S3C2410_GPDCON_MASK(15)),
+};
+
+/* MMC/SD */
+
+static struct s3c24xx_mci_pdata mini2440_mmc_cfg __initdata = {
+ .gpio_detect = S3C2410_GPG(8),
+ .gpio_wprotect = S3C2410_GPH(8),
+ .set_power = NULL,
+ .ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34,
+};
+
+/* NAND Flash on MINI2440 board */
+
+static struct mtd_partition mini2440_default_nand_part[] __initdata = {
+ [0] = {
+ .name = "u-boot",
+ .size = SZ_256K,
+ .offset = 0,
+ },
+ [1] = {
+ .name = "u-boot-env",
+ .size = SZ_128K,
+ .offset = SZ_256K,
+ },
+ [2] = {
+ .name = "kernel",
+ /* 5 megabytes, for a kernel with no modules
+ * or a uImage with a ramdisk attached */
+ .size = 0x00500000,
+ .offset = SZ_256K + SZ_128K,
+ },
+ [3] = {
+ .name = "root",
+ .offset = SZ_256K + SZ_128K + 0x00500000,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct s3c2410_nand_set mini2440_nand_sets[] __initdata = {
+ [0] = {
+ .name = "nand",
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(mini2440_default_nand_part),
+ .partitions = mini2440_default_nand_part,
+ },
+};
+
+static struct s3c2410_platform_nand mini2440_nand_info __initdata = {
+ .tacls = 0,
+ .twrph0 = 25,
+ .twrph1 = 15,
+ .nr_sets = ARRAY_SIZE(mini2440_nand_sets),
+ .sets = mini2440_nand_sets,
+ .ignore_unset_ecc = 1,
+};
+
+/* DM9000AEP 10/100 ethernet controller */
+
+static struct resource mini2440_dm9k_resource[] __initdata = {
+ [0] = {
+ .start = MACH_MINI2440_DM9K_BASE,
+ .end = MACH_MINI2440_DM9K_BASE + 3,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = MACH_MINI2440_DM9K_BASE + 4,
+ .end = MACH_MINI2440_DM9K_BASE + 7,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = IRQ_EINT7,
+ .end = IRQ_EINT7,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ }
+};
+
+/*
+ * The DM9000 has no eeprom, and it's MAC address is set by
+ * the bootloader before starting the kernel.
+ */
+static struct dm9000_plat_data mini2440_dm9k_pdata __initdata = {
+ .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+};
+
+static struct platform_device mini2440_device_eth __initdata = {
+ .name = "dm9000",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(mini2440_dm9k_resource),
+ .resource = mini2440_dm9k_resource,
+ .dev = {
+ .platform_data = &mini2440_dm9k_pdata,
+ },
+};
+
+/* CON5
+ * +--+ /-----\
+ * | | | |
+ * | | | BAT |
+ * | | \_____/
+ * | |
+ * | | +----+ +----+
+ * | | | K5 | | K1 |
+ * | | +----+ +----+
+ * | | +----+ +----+
+ * | | | K4 | | K2 |
+ * | | +----+ +----+
+ * | | +----+ +----+
+ * | | | K6 | | K3 |
+ * | | +----+ +----+
+ * .....
+ */
+static struct gpio_keys_button mini2440_buttons[] __initdata = {
+ {
+ .gpio = S3C2410_GPG(0), /* K1 */
+ .code = KEY_F1,
+ .desc = "Button 1",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(3), /* K2 */
+ .code = KEY_F2,
+ .desc = "Button 2",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(5), /* K3 */
+ .code = KEY_F3,
+ .desc = "Button 3",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(6), /* K4 */
+ .code = KEY_POWER,
+ .desc = "Power",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(7), /* K5 */
+ .code = KEY_F5,
+ .desc = "Button 5",
+ .active_low = 1,
+ },
+#if 0
+ /* this pin is also known as TCLK1 and seems to already
+ * marked as "in use" somehow in the kernel -- possibly wrongly */
+ {
+ .gpio = S3C2410_GPG(11), /* K6 */
+ .code = KEY_F6,
+ .desc = "Button 6",
+ .active_low = 1,
+ },
+#endif
+};
+
+static struct gpio_keys_platform_data mini2440_button_data __initdata = {
+ .buttons = mini2440_buttons,
+ .nbuttons = ARRAY_SIZE(mini2440_buttons),
+};
+
+static struct platform_device mini2440_button_device __initdata = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &mini2440_button_data,
+ }
+};
+
+/* LEDS */
+
+static struct s3c24xx_led_platdata mini2440_led1_pdata __initdata = {
+ .name = "led1",
+ .gpio = S3C2410_GPB(5),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "heartbeat",
+};
+
+static struct s3c24xx_led_platdata mini2440_led2_pdata __initdata = {
+ .name = "led2",
+ .gpio = S3C2410_GPB(6),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "nand-disk",
+};
+
+static struct s3c24xx_led_platdata mini2440_led3_pdata __initdata = {
+ .name = "led3",
+ .gpio = S3C2410_GPB(7),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "mmc0",
+};
+
+static struct s3c24xx_led_platdata mini2440_led4_pdata __initdata = {
+ .name = "led4",
+ .gpio = S3C2410_GPB(8),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "",
+};
+
+static struct s3c24xx_led_platdata mini2440_led_backlight_pdata __initdata = {
+ .name = "backlight",
+ .gpio = S3C2410_GPG(4),
+ .def_trigger = "backlight",
+};
+
+static struct platform_device mini2440_led1 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 1,
+ .dev = {
+ .platform_data = &mini2440_led1_pdata,
+ },
+};
+
+static struct platform_device mini2440_led2 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 2,
+ .dev = {
+ .platform_data = &mini2440_led2_pdata,
+ },
+};
+
+static struct platform_device mini2440_led3 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 3,
+ .dev = {
+ .platform_data = &mini2440_led3_pdata,
+ },
+};
+
+static struct platform_device mini2440_led4 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 4,
+ .dev = {
+ .platform_data = &mini2440_led4_pdata,
+ },
+};
+
+static struct platform_device mini2440_led_backlight __initdata = {
+ .name = "s3c24xx_led",
+ .id = 5,
+ .dev = {
+ .platform_data = &mini2440_led_backlight_pdata,
+ },
+};
+
+/* AUDIO */
+
+static struct s3c24xx_uda134x_platform_data mini2440_audio_pins __initdata = {
+ .l3_clk = S3C2410_GPB(4),
+ .l3_mode = S3C2410_GPB(2),
+ .l3_data = S3C2410_GPB(3),
+ .model = UDA134X_UDA1341
+};
+
+static struct platform_device mini2440_audio __initdata = {
+ .name = "s3c24xx_uda134x",
+ .id = 0,
+ .dev = {
+ .platform_data = &mini2440_audio_pins,
+ },
+};
+
+/*
+ * I2C devices
+ */
+static struct at24_platform_data at24c08 = {
+ .byte_len = SZ_8K / 8,
+ .page_size = 16,
+};
+
+static struct i2c_board_info mini2440_i2c_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("24c08", 0x50),
+ .platform_data = &at24c08,
+ },
+};
+
+static struct platform_device *mini2440_devices[] __initdata = {
+ &s3c_device_usb,
+ &s3c_device_wdt,
+/* &s3c_device_adc,*/ /* ADC doesn't like living with touchscreen ! */
+ &s3c_device_i2c0,
+ &s3c_device_rtc,
+ &s3c_device_usbgadget,
+ &mini2440_device_eth,
+ &mini2440_led1,
+ &mini2440_led2,
+ &mini2440_led3,
+ &mini2440_led4,
+ &mini2440_button_device,
+ &s3c_device_nand,
+ &s3c_device_sdi,
+ &s3c_device_iis,
+ &mini2440_audio,
+/* &s3c_device_timer[0],*/ /* buzzer pwm, no API for it */
+ /* remaining devices are optional */
+};
+
+static void __init mini2440_map_io(void)
+{
+ s3c24xx_init_io(mini2440_iodesc, ARRAY_SIZE(mini2440_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(mini2440_uartcfgs, ARRAY_SIZE(mini2440_uartcfgs));
+
+ s3c_device_nand.dev.platform_data = &mini2440_nand_info;
+ s3c_device_sdi.dev.platform_data = &mini2440_mmc_cfg;
+}
+
+/*
+ * mini2440_features string
+ *
+ * t = Touchscreen present
+ * b = backlight control
+ * c = camera [TODO]
+ * 0-9 LCD configuration
+ *
+ */
+static char mini2440_features_str[12] __initdata = "0tb";
+
+static int __init mini2440_features_setup(char *str)
+{
+ if (str)
+ strlcpy(mini2440_features_str, str, sizeof(mini2440_features_str));
+ return 1;
+}
+
+__setup("mini2440=", mini2440_features_setup);
+
+#define FEATURE_SCREEN (1 << 0)
+#define FEATURE_BACKLIGHT (1 << 1)
+#define FEATURE_TOUCH (1 << 2)
+#define FEATURE_CAMERA (1 << 3)
+
+struct mini2440_features_t {
+ int count;
+ int done;
+ int lcd_index;
+ struct platform_device *optional[8];
+};
+
+static void mini2440_parse_features(
+ struct mini2440_features_t * features,
+ const char * features_str )
+{
+ const char * fp = features_str;
+
+ features->count = 0;
+ features->done = 0;
+ features->lcd_index = -1;
+
+ while (*fp) {
+ char f = *fp++;
+
+ switch (f) {
+ case '0'...'9': /* tft screen */
+ if (features->done & FEATURE_SCREEN) {
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "screen type already set\n", f);
+ } else {
+ int li = f - '0';
+ if (li >= ARRAY_SIZE(mini2440_lcd_cfg))
+ printk(KERN_INFO "MINI2440: "
+ "'%c' out of range LCD mode\n", f);
+ else {
+ features->optional[features->count++] =
+ &s3c_device_lcd;
+ features->lcd_index = li;
+ }
+ }
+ features->done |= FEATURE_SCREEN;
+ break;
+ case 'b':
+ if (features->done & FEATURE_BACKLIGHT)
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "backlight already set\n", f);
+ else {
+ features->optional[features->count++] =
+ &mini2440_led_backlight;
+ }
+ features->done |= FEATURE_BACKLIGHT;
+ break;
+ case 't':
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "touchscreen not compiled in\n", f);
+ break;
+ case 'c':
+ if (features->done & FEATURE_CAMERA)
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "camera already registered\n", f);
+ else
+ features->optional[features->count++] =
+ &s3c_device_camif;
+ features->done |= FEATURE_CAMERA;
+ break;
+ }
+ }
+}
+
+static void __init mini2440_init(void)
+{
+ struct mini2440_features_t features = { 0 };
+ int i;
+
+ printk(KERN_INFO "MINI2440: Option string mini2440=%s\n",
+ mini2440_features_str);
+
+ /* Parse the feature string */
+ mini2440_parse_features(&features, mini2440_features_str);
+
+ /* turn LCD on */
+ s3c2410_gpio_cfgpin(S3C2410_GPC(0), S3C2410_GPC0_LEND);
+
+ /* Turn the backlight early on */
+ s3c2410_gpio_setpin(S3C2410_GPG(4), 1);
+ s3c2410_gpio_cfgpin(S3C2410_GPG(4), S3C2410_GPIO_OUTPUT);
+
+ /* remove pullup on optional PWM backlight -- unused on 3.5 and 7"s */
+ s3c2410_gpio_pullup(S3C2410_GPB(1), 0);
+ s3c2410_gpio_setpin(S3C2410_GPB(1), 0);
+ s3c2410_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT);
+
+ /* Make sure the D+ pullup pin is output */
+ s3c2410_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
+
+ /* mark the key as input, without pullups (there is one on the board) */
+ for (i = 0; i < ARRAY_SIZE(mini2440_buttons); i++) {
+ s3c2410_gpio_pullup(mini2440_buttons[i].gpio, 0);
+ s3c2410_gpio_cfgpin(mini2440_buttons[i].gpio,
+ S3C2410_GPIO_INPUT);
+ }
+ if (features.lcd_index != -1) {
+ int li;
+
+ mini2440_fb_info.displays =
+ &mini2440_lcd_cfg[features.lcd_index];
+
+ printk(KERN_INFO "MINI2440: LCD");
+ for (li = 0; li < ARRAY_SIZE(mini2440_lcd_cfg); li++)
+ if (li == features.lcd_index)
+ printk(" [%d:%dx%d]", li,
+ mini2440_lcd_cfg[li].width,
+ mini2440_lcd_cfg[li].height);
+ else
+ printk(" %d:%dx%d", li,
+ mini2440_lcd_cfg[li].width,
+ mini2440_lcd_cfg[li].height);
+ printk("\n");
+ s3c24xx_fb_set_platdata(&mini2440_fb_info);
+ }
+ s3c24xx_udc_set_platdata(&mini2440_udc_cfg);
+ s3c_i2c0_set_platdata(NULL);
+ i2c_register_board_info(0, mini2440_i2c_devs,
+ ARRAY_SIZE(mini2440_i2c_devs));
+
+ platform_add_devices(mini2440_devices, ARRAY_SIZE(mini2440_devices));
+
+ if (features.count) /* the optional features */
+ platform_add_devices(features.optional, features.count);
+
+}
+
+
+MACHINE_START(MINI2440, "MINI2440")
+ /* Maintainer: Michel Pollet <buserror@gmail.com> */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+ .map_io = mini2440_map_io,
+ .init_machine = mini2440_init,
+ .init_irq = s3c24xx_init_irq,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c2442/Kconfig b/arch/arm/mach-s3c2442/Kconfig
index b289d19..103e913 100644
--- a/arch/arm/mach-s3c2442/Kconfig
+++ b/arch/arm/mach-s3c2442/Kconfig
@@ -24,6 +24,18 @@
depends on ARCH_S3C2440
select CPU_S3C2442
+config MACH_NEO1973_GTA02
+ bool "Openmoko GTA02 / Freerunner phone"
+ select CPU_S3C2442
+ select MFD_PCF50633
+ select PCF50633_GPIO
+ select I2C
+ select POWER_SUPPLY
+ select MACH_NEO1973
+ select S3C2410_PWM
+ help
+ Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
+
endmenu
diff --git a/arch/arm/mach-s3c2442/Makefile b/arch/arm/mach-s3c2442/Makefile
index 2a909c6..2a19113 100644
--- a/arch/arm/mach-s3c2442/Makefile
+++ b/arch/arm/mach-s3c2442/Makefile
@@ -12,5 +12,7 @@
obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
obj-$(CONFIG_CPU_S3C2442) += clock.o
+obj-$(CONFIG_MACH_NEO1973_GTA02) += mach-gta02.o
+
# Machine support
diff --git a/arch/arm/mach-s3c2442/include/mach/gta02.h b/arch/arm/mach-s3c2442/include/mach/gta02.h
new file mode 100644
index 0000000..953331d
--- /dev/null
+++ b/arch/arm/mach-s3c2442/include/mach/gta02.h
@@ -0,0 +1,84 @@
+#ifndef _GTA02_H
+#define _GTA02_H
+
+#include <mach/regs-gpio.h>
+
+/* Different hardware revisions, passed in ATAG_REVISION by u-boot */
+#define GTA02v1_SYSTEM_REV 0x00000310
+#define GTA02v2_SYSTEM_REV 0x00000320
+#define GTA02v3_SYSTEM_REV 0x00000330
+#define GTA02v4_SYSTEM_REV 0x00000340
+#define GTA02v5_SYSTEM_REV 0x00000350
+/* since A7 is basically same as A6, we use A6 PCB ID */
+#define GTA02v6_SYSTEM_REV 0x00000360
+
+#define GTA02_GPIO_n3DL_GSM S3C2410_GPA(13) /* v1 + v2 + v3 only */
+
+#define GTA02_GPIO_PWR_LED1 S3C2410_GPB(0)
+#define GTA02_GPIO_PWR_LED2 S3C2410_GPB(1)
+#define GTA02_GPIO_AUX_LED S3C2410_GPB(2)
+#define GTA02_GPIO_VIBRATOR_ON S3C2410_GPB(3)
+#define GTA02_GPIO_MODEM_RST S3C2410_GPB(5)
+#define GTA02_GPIO_BT_EN S3C2410_GPB(6)
+#define GTA02_GPIO_MODEM_ON S3C2410_GPB(7)
+#define GTA02_GPIO_EXTINT8 S3C2410_GPB(8)
+#define GTA02_GPIO_USB_PULLUP S3C2410_GPB(9)
+
+#define GTA02_GPIO_PIO5 S3C2410_GPC(5) /* v3 + v4 only */
+
+#define GTA02v3_GPIO_nG1_CS S3C2410_GPD(12) /* v3 + v4 only */
+#define GTA02v3_GPIO_nG2_CS S3C2410_GPD(13) /* v3 + v4 only */
+#define GTA02v5_GPIO_HDQ S3C2410_GPD(14) /* v5 + */
+
+#define GTA02_GPIO_nG1_INT S3C2410_GPF(0)
+#define GTA02_GPIO_IO1 S3C2410_GPF(1)
+#define GTA02_GPIO_PIO_2 S3C2410_GPF(2) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_JACK_INSERT S3C2410_GPF(4)
+#define GTA02_GPIO_WLAN_GPIO1 S3C2410_GPF(5) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_AUX_KEY S3C2410_GPF(6)
+#define GTA02_GPIO_HOLD_KEY S3C2410_GPF(7)
+
+#define GTA02_GPIO_3D_IRQ S3C2410_GPG(4)
+#define GTA02v2_GPIO_nG2_INT S3C2410_GPG(8) /* v2 + v3 + v4 only */
+#define GTA02v3_GPIO_nUSB_OC S3C2410_GPG(9) /* v3 + v4 only */
+#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
+#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
+
+#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */
+#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2
+#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4
+#define GTA02_GPIO_3D_RESET S3C2440_GPJ5
+#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */
+#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7
+#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8
+#define GTA02_GPIO_KEEPACT S3C2440_GPJ8
+#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10
+#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */
+
+#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
+#define GTA02_IRQ_MODEM IRQ_EINT1
+#define GTA02_IRQ_PIO_2 IRQ_EINT2 /* v2 + v3 + v4 only */
+#define GTA02_IRQ_nJACK_INSERT IRQ_EINT4
+#define GTA02_IRQ_WLAN_GPIO1 IRQ_EINT5
+#define GTA02_IRQ_AUX IRQ_EINT6
+#define GTA02_IRQ_nHOLD IRQ_EINT7
+#define GTA02_IRQ_PCF50633 IRQ_EINT9
+#define GTA02_IRQ_3D IRQ_EINT12
+#define GTA02_IRQ_GSENSOR_2 IRQ_EINT16 /* v2 + v3 + v4 only */
+#define GTA02v3_IRQ_nUSB_OC IRQ_EINT17 /* v3 + v4 only */
+#define GTA02v3_IRQ_nUSB_FLT IRQ_EINT18 /* v3 + v4 only */
+#define GTA02v3_IRQ_nGSM_OC IRQ_EINT19 /* v3 + v4 only */
+
+/* returns 00 000 on GTA02 A5 and earlier, A6 returns 01 001 */
+#define GTA02_PCB_ID1_0 S3C2410_GPC(13)
+#define GTA02_PCB_ID1_1 S3C2410_GPC(15)
+#define GTA02_PCB_ID1_2 S3C2410_GPD(0)
+#define GTA02_PCB_ID2_0 S3C2410_GPD(3)
+#define GTA02_PCB_ID2_1 S3C2410_GPD(4)
+
+int gta02_get_pcb_revision(void);
+
+#endif /* _GTA02_H */
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c
new file mode 100644
index 0000000..e23b581
--- /dev/null
+++ b/arch/arm/mach-s3c2442/mach-gta02.c
@@ -0,0 +1,646 @@
+/*
+ * linux/arch/arm/mach-s3c2442/mach-gta02.c
+ *
+ * S3C2442 Machine Support for Openmoko GTA02 / FreeRunner.
+ *
+ * Copyright (C) 2006-2009 by Openmoko, Inc.
+ * Authors: Harald Welte <laforge@openmoko.org>
+ * Andy Green <andy@openmoko.org>
+ * Werner Almesberger <werner@openmoko.org>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/spi/spi.h>
+
+#include <linux/mmc/host.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/io.h>
+
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/regulator/machine.h>
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/mfd/pcf50633/mbc.h>
+#include <linux/mfd/pcf50633/adc.h>
+#include <linux/mfd/pcf50633/gpio.h>
+#include <linux/mfd/pcf50633/pmic.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-gpioj.h>
+#include <mach/fb.h>
+
+#include <mach/spi.h>
+#include <mach/spi-gpio.h>
+#include <plat/usb-control.h>
+#include <mach/regs-mem.h>
+#include <mach/hardware.h>
+
+#include <mach/gta02.h>
+
+#include <plat/regs-serial.h>
+#include <plat/nand.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+#include <plat/pm.h>
+#include <plat/udc.h>
+#include <plat/gpio-cfg.h>
+#include <plat/iic.h>
+
+static struct pcf50633 *gta02_pcf;
+
+/*
+ * This gets called every 1ms when we paniced.
+ */
+
+static long gta02_panic_blink(long count)
+{
+ long delay = 0;
+ static long last_blink;
+ static char led;
+
+ /* Fast blink: 200ms period. */
+ if (count - last_blink < 100)
+ return 0;
+
+ led ^= 1;
+ gpio_direction_output(GTA02_GPIO_AUX_LED, led);
+
+ last_blink = count;
+
+ return delay;
+}
+
+
+static struct map_desc gta02_iodesc[] __initdata = {
+ {
+ .virtual = 0xe0000000,
+ .pfn = __phys_to_pfn(S3C2410_CS3 + 0x01000000),
+ .length = SZ_1M,
+ .type = MT_DEVICE
+ },
+};
+
+#define UCON (S3C2410_UCON_DEFAULT | S3C2443_UCON_RXERR_IRQEN)
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
+
+static struct s3c2410_uartcfg gta02_uartcfgs[] = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+};
+
+#ifdef CONFIG_CHARGER_PCF50633
+/*
+ * On GTA02 the 1A charger features a 48K resistor to 0V on the ID pin.
+ * We use this to recognize that we can pull 1A from the USB socket.
+ *
+ * These constants are the measured pcf50633 ADC levels with the 1A
+ * charger / 48K resistor, and with no pulldown resistor.
+ */
+
+#define ADC_NOM_CHG_DETECT_1A 6
+#define ADC_NOM_CHG_DETECT_USB 43
+
+static void
+gta02_configure_pmu_for_charger(struct pcf50633 *pcf, void *unused, int res)
+{
+ int ma;
+
+ /* Interpret charger type */
+ if (res < ((ADC_NOM_CHG_DETECT_USB + ADC_NOM_CHG_DETECT_1A) / 2)) {
+
+ /*
+ * Sanity - stop GPO driving out now that we have a 1A charger
+ * GPO controls USB Host power generation on GTA02
+ */
+ pcf50633_gpio_set(pcf, PCF50633_GPO, 0);
+
+ ma = 1000;
+ } else
+ ma = 100;
+
+ pcf50633_mbc_usb_curlim_set(pcf, ma);
+}
+
+static struct delayed_work gta02_charger_work;
+static int gta02_usb_vbus_draw;
+
+static void gta02_charger_worker(struct work_struct *work)
+{
+ if (gta02_usb_vbus_draw) {
+ pcf50633_mbc_usb_curlim_set(gta02_pcf, gta02_usb_vbus_draw);
+ return;
+ }
+
+#ifdef CONFIG_PCF50633_ADC
+ pcf50633_adc_async_read(gta02_pcf,
+ PCF50633_ADCC1_MUX_ADCIN1,
+ PCF50633_ADCC1_AVERAGE_16,
+ gta02_configure_pmu_for_charger,
+ NULL);
+#else
+ /*
+ * If the PCF50633 ADC is disabled we fallback to a
+ * 100mA limit for safety.
+ */
+ pcf50633_mbc_usb_curlim_set(pcf, 100);
+#endif
+}
+
+#define GTA02_CHARGER_CONFIGURE_TIMEOUT ((3000 * HZ) / 1000)
+
+static void gta02_pmu_event_callback(struct pcf50633 *pcf, int irq)
+{
+ if (irq == PCF50633_IRQ_USBINS) {
+ schedule_delayed_work(>a02_charger_work,
+ GTA02_CHARGER_CONFIGURE_TIMEOUT);
+
+ return;
+ }
+
+ if (irq == PCF50633_IRQ_USBREM) {
+ cancel_delayed_work_sync(>a02_charger_work);
+ gta02_usb_vbus_draw = 0;
+ }
+}
+
+static void gta02_udc_vbus_draw(unsigned int ma)
+{
+ if (!gta02_pcf)
+ return;
+
+ gta02_usb_vbus_draw = ma;
+
+ schedule_delayed_work(>a02_charger_work,
+ GTA02_CHARGER_CONFIGURE_TIMEOUT);
+}
+#else /* !CONFIG_CHARGER_PCF50633 */
+#define gta02_pmu_event_callback NULL
+#define gta02_udc_vbus_draw NULL
+#endif
+
+/*
+ * This is called when pc50633 is probed, unfortunately quite late in the
+ * day since it is an I2C bus device. Here we can belatedly define some
+ * platform devices with the advantage that we can mark the pcf50633 as the
+ * parent. This makes them get suspended and resumed with their parent
+ * the pcf50633 still around.
+ */
+
+static void gta02_pmu_attach_child_devices(struct pcf50633 *pcf);
+
+
+static char *gta02_batteries[] = {
+ "battery",
+};
+
+struct pcf50633_platform_data gta02_pcf_pdata = {
+ .resumers = {
+ [0] = PCF50633_INT1_USBINS |
+ PCF50633_INT1_USBREM |
+ PCF50633_INT1_ALARM,
+ [1] = PCF50633_INT2_ONKEYF,
+ [2] = PCF50633_INT3_ONKEY1S,
+ [3] = PCF50633_INT4_LOWSYS |
+ PCF50633_INT4_LOWBAT |
+ PCF50633_INT4_HIGHTMP,
+ },
+
+ .batteries = gta02_batteries,
+ .num_batteries = ARRAY_SIZE(gta02_batteries),
+ .reg_init_data = {
+ [PCF50633_REGULATOR_AUTO] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .always_on = 1,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_DOWN1] = {
+ .constraints = {
+ .min_uV = 1300000,
+ .max_uV = 1600000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .always_on = 1,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_DOWN2] = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .always_on = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_HCLDO] = {
+ .constraints = {
+ .min_uV = 2000000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .always_on = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO1] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 0,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_LDO2] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO3] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO4] = {
+ .constraints = {
+ .min_uV = 3200000,
+ .max_uV = 3200000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO5] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_LDO6] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ },
+ },
+ [PCF50633_REGULATOR_MEMLDO] = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+
+ },
+ .probe_done = gta02_pmu_attach_child_devices,
+ .mbc_event_callback = gta02_pmu_event_callback,
+};
+
+
+/* NOR Flash. */
+
+#define GTA02_FLASH_BASE 0x18000000 /* GCS3 */
+#define GTA02_FLASH_SIZE 0x200000 /* 2MBytes */
+
+static struct physmap_flash_data gta02_nor_flash_data = {
+ .width = 2,
+};
+
+static struct resource gta02_nor_flash_resource = {
+ .start = GTA02_FLASH_BASE,
+ .end = GTA02_FLASH_BASE + GTA02_FLASH_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device gta02_nor_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = >a02_nor_flash_data,
+ },
+ .resource = >a02_nor_flash_resource,
+ .num_resources = 1,
+};
+
+
+struct platform_device s3c24xx_pwm_device = {
+ .name = "s3c24xx_pwm",
+ .num_resources = 0,
+};
+
+static struct i2c_board_info gta02_i2c_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("pcf50633", 0x73),
+ .irq = GTA02_IRQ_PCF50633,
+ .platform_data = >a02_pcf_pdata,
+ },
+ {
+ I2C_BOARD_INFO("wm8753", 0x1a),
+ },
+};
+
+static struct s3c2410_nand_set gta02_nand_sets[] = {
+ [0] = {
+ /*
+ * This name is also hard-coded in the boot loaders, so
+ * changing it would would require all users to upgrade
+ * their boot loaders, some of which are stored in a NOR
+ * that is considered to be immutable.
+ */
+ .name = "neo1973-nand",
+ .nr_chips = 1,
+ .use_bbt = 1,
+ .force_soft_ecc = 1,
+ },
+};
+
+/*
+ * Choose a set of timings derived from S3C@2442B MCP54
+ * data sheet (K5D2G13ACM-D075 MCP Memory).
+ */
+
+static struct s3c2410_platform_nand gta02_nand_info = {
+ .tacls = 0,
+ .twrph0 = 25,
+ .twrph1 = 15,
+ .nr_sets = ARRAY_SIZE(gta02_nand_sets),
+ .sets = gta02_nand_sets,
+};
+
+
+static void gta02_udc_command(enum s3c2410_udc_cmd_e cmd)
+{
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE:
+ pr_debug("%s S3C2410_UDC_P_ENABLE\n", __func__);
+ gpio_direction_output(GTA02_GPIO_USB_PULLUP, 1);
+ break;
+ case S3C2410_UDC_P_DISABLE:
+ pr_debug("%s S3C2410_UDC_P_DISABLE\n", __func__);
+ gpio_direction_output(GTA02_GPIO_USB_PULLUP, 0);
+ break;
+ case S3C2410_UDC_P_RESET:
+ pr_debug("%s S3C2410_UDC_P_RESET\n", __func__);
+ /* FIXME: Do something here. */
+ }
+}
+
+/* Get PMU to set USB current limit accordingly. */
+static struct s3c2410_udc_mach_info gta02_udc_cfg = {
+ .vbus_draw = gta02_udc_vbus_draw,
+ .udc_command = gta02_udc_command,
+
+};
+
+
+
+static void gta02_bl_set_intensity(int intensity)
+{
+ struct pcf50633 *pcf = gta02_pcf;
+ int old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
+
+ /* We map 8-bit intensity to 6-bit intensity in hardware. */
+ intensity >>= 2;
+
+ /*
+ * This can happen during, eg, print of panic on blanked console,
+ * but we can't service i2c without interrupts active, so abort.
+ */
+ if (in_atomic()) {
+ printk(KERN_ERR "gta02_bl_set_intensity called while atomic\n");
+ return;
+ }
+
+ old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
+ if (intensity == old_intensity)
+ return;
+
+ /* We can't do this anywhere else. */
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDDIM, 5);
+
+ if (!(pcf50633_reg_read(pcf, PCF50633_REG_LEDENA) & 3))
+ old_intensity = 0;
+
+ /*
+ * The PCF50633 cannot handle LEDOUT = 0 (datasheet p60)
+ * if seen, you have to re-enable the LED unit.
+ */
+ if (!intensity || !old_intensity)
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 0);
+
+ /* Illegal to set LEDOUT to 0. */
+ if (!intensity)
+ pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f, 2);
+ else
+ pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f,
+ intensity);
+
+ if (intensity)
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 2);
+
+}
+
+static struct generic_bl_info gta02_bl_info = {
+ .name = "gta02-bl",
+ .max_intensity = 0xff,
+ .default_intensity = 0xff,
+ .set_bl_intensity = gta02_bl_set_intensity,
+};
+
+static struct platform_device gta02_bl_dev = {
+ .name = "generic-bl",
+ .id = 1,
+ .dev = {
+ .platform_data = >a02_bl_info,
+ },
+};
+
+
+
+/* USB */
+static struct s3c2410_hcd_info gta02_usb_info = {
+ .port[0] = {
+ .flags = S3C_HCDFLG_USED,
+ },
+ .port[1] = {
+ .flags = 0,
+ },
+};
+
+
+static void __init gta02_map_io(void)
+{
+ s3c24xx_init_io(gta02_iodesc, ARRAY_SIZE(gta02_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(gta02_uartcfgs, ARRAY_SIZE(gta02_uartcfgs));
+}
+
+
+/* These are the guys that don't need to be children of PMU. */
+
+static struct platform_device *gta02_devices[] __initdata = {
+ &s3c_device_usb,
+ &s3c_device_wdt,
+ &s3c_device_sdi,
+ &s3c_device_usbgadget,
+ &s3c_device_nand,
+ >a02_nor_flash,
+ &s3c24xx_pwm_device,
+ &s3c_device_iis,
+ &s3c_device_i2c0,
+};
+
+/* These guys DO need to be children of PMU. */
+
+static struct platform_device *gta02_devices_pmu_children[] = {
+ >a02_bl_dev,
+};
+
+
+/*
+ * This is called when pc50633 is probed, quite late in the day since it is an
+ * I2C bus device. Here we can define platform devices with the advantage that
+ * we can mark the pcf50633 as the parent. This makes them get suspended and
+ * resumed with their parent the pcf50633 still around. All devices whose
+ * operation depends on something from pcf50633 must have this relationship
+ * made explicit like this, or suspend and resume will become an unreliable
+ * hellworld.
+ */
+
+static void gta02_pmu_attach_child_devices(struct pcf50633 *pcf)
+{
+ int n;
+
+ /* Grab a copy of the now probed PMU pointer. */
+ gta02_pcf = pcf;
+
+ for (n = 0; n < ARRAY_SIZE(gta02_devices_pmu_children); n++)
+ gta02_devices_pmu_children[n]->dev.parent = pcf->dev;
+
+ platform_add_devices(gta02_devices_pmu_children,
+ ARRAY_SIZE(gta02_devices_pmu_children));
+}
+
+static void gta02_poweroff(void)
+{
+ pcf50633_reg_set_bit_mask(gta02_pcf, PCF50633_REG_OOCSHDWN, 1, 1);
+}
+
+static void __init gta02_machine_init(void)
+{
+ /* Set the panic callback to make AUX LED blink at ~5Hz. */
+ panic_blink = gta02_panic_blink;
+
+ s3c_pm_init();
+
+#ifdef CONFIG_CHARGER_PCF50633
+ INIT_DELAYED_WORK(>a02_charger_work, gta02_charger_worker);
+#endif
+
+ s3c_device_usb.dev.platform_data = >a02_usb_info;
+ s3c_device_nand.dev.platform_data = >a02_nand_info;
+
+ s3c24xx_udc_set_platdata(>a02_udc_cfg);
+ s3c_i2c0_set_platdata(NULL);
+
+ i2c_register_board_info(0, gta02_i2c_devs, ARRAY_SIZE(gta02_i2c_devs));
+
+ platform_add_devices(gta02_devices, ARRAY_SIZE(gta02_devices));
+ pm_power_off = gta02_poweroff;
+}
+
+
+MACHINE_START(NEO1973_GTA02, "GTA02")
+ /* Maintainer: Nelson Castillo <arhuaco@freaks-unidos.net> */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+ .map_io = gta02_map_io,
+ .init_irq = s3c24xx_init_irq,
+ .init_machine = gta02_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 3a398be..03cd27d 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -62,6 +62,12 @@
#define SHIFT_ASR 0x40
#define SHIFT_RORRRX 0x60
+#define BAD_INSTR 0xdeadc0de
+
+/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
+#define IS_T32(hi16) \
+ (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
+
static unsigned long ai_user;
static unsigned long ai_sys;
static unsigned long ai_skipped;
@@ -332,38 +338,48 @@
struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
+ unsigned int rd2;
+ int load;
- if (((rd & 1) == 1) || (rd == 14))
+ if ((instr & 0xfe000000) == 0xe8000000) {
+ /* ARMv7 Thumb-2 32-bit LDRD/STRD */
+ rd2 = (instr >> 8) & 0xf;
+ load = !!(LDST_L_BIT(instr));
+ } else if (((rd & 1) == 1) || (rd == 14))
goto bad;
+ else {
+ load = ((instr & 0xf0) == 0xd0);
+ rd2 = rd + 1;
+ }
ai_dword += 1;
if (user_mode(regs))
goto user;
- if ((instr & 0xf0) == 0xd0) {
+ if (load) {
unsigned long val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32_unaligned_check(val, addr + 4);
- regs->uregs[rd + 1] = val;
+ regs->uregs[rd2] = val;
} else {
put32_unaligned_check(regs->uregs[rd], addr);
- put32_unaligned_check(regs->uregs[rd + 1], addr + 4);
+ put32_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
user:
- if ((instr & 0xf0) == 0xd0) {
+ if (load) {
unsigned long val;
get32t_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32t_unaligned_check(val, addr + 4);
- regs->uregs[rd + 1] = val;
+ regs->uregs[rd2] = val;
} else {
put32t_unaligned_check(regs->uregs[rd], addr);
- put32t_unaligned_check(regs->uregs[rd + 1], addr + 4);
+ put32t_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
@@ -616,10 +632,74 @@
/* Else fall through for illegal instruction case */
default:
- return 0xdeadc0de;
+ return BAD_INSTR;
}
}
+/*
+ * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
+ * handlable by ARM alignment handler, also find the corresponding handler,
+ * so that we can reuse ARM userland alignment fault fixups for Thumb.
+ *
+ * @pinstr: original Thumb-2 instruction; returns new handlable instruction
+ * @regs: register context.
+ * @poffset: return offset from faulted addr for later writeback
+ *
+ * NOTES:
+ * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
+ * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
+ */
+static void *
+do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+ union offset_union *poffset)
+{
+ unsigned long instr = *pinstr;
+ u16 tinst1 = (instr >> 16) & 0xffff;
+ u16 tinst2 = instr & 0xffff;
+ poffset->un = 0;
+
+ switch (tinst1 & 0xffe0) {
+ /* A6.3.5 Load/Store multiple */
+ case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
+ case 0xe8a0: /* ...above writeback version */
+ case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */
+ case 0xe920: /* ...above writeback version */
+ /* no need offset decision since handler calculates it */
+ return do_alignment_ldmstm;
+
+ case 0xf840: /* POP/PUSH T3 (single register) */
+ if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
+ u32 L = !!(LDST_L_BIT(instr));
+ const u32 subset[2] = {
+ 0xe92d0000, /* STMDB sp!,{registers} */
+ 0xe8bd0000, /* LDMIA sp!,{registers} */
+ };
+ *pinstr = subset[L] | (1<<RD_BITS(instr));
+ return do_alignment_ldmstm;
+ }
+ /* Else fall through for illegal instruction case */
+ break;
+
+ /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
+ case 0xe860:
+ case 0xe960:
+ case 0xe8e0:
+ case 0xe9e0:
+ poffset->un = (tinst2 & 0xff) << 2;
+ case 0xe940:
+ case 0xe9c0:
+ return do_alignment_ldrdstrd;
+
+ /*
+ * No need to handle load/store instructions up to word size
+ * since ARMv6 and later CPUs can perform unaligned accesses.
+ */
+ default:
+ break;
+ }
+ return NULL;
+}
+
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -630,6 +710,8 @@
mm_segment_t fs;
unsigned int fault;
u16 tinstr = 0;
+ int isize = 4;
+ int thumb2_32b = 0;
instrptr = instruction_pointer(regs);
@@ -637,8 +719,19 @@
set_fs(KERNEL_DS);
if (thumb_mode(regs)) {
fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
- if (!(fault))
- instr = thumb2arm(tinstr);
+ if (!fault) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+ IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2 = 0;
+ fault = __get_user(tinst2, (u16 *)(instrptr+2));
+ instr = (tinstr << 16) | tinst2;
+ thumb2_32b = 1;
+ } else {
+ isize = 2;
+ instr = thumb2arm(tinstr);
+ }
+ }
} else
fault = __get_user(instr, (u32 *)instrptr);
set_fs(fs);
@@ -655,7 +748,7 @@
fixup:
- regs->ARM_pc += thumb_mode(regs) ? 2 : 4;
+ regs->ARM_pc += isize;
switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */
@@ -714,18 +807,25 @@
handler = do_alignment_ldrstr;
break;
- case 0x08000000: /* ldm or stm */
- handler = do_alignment_ldmstm;
+ case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
+ if (thumb2_32b)
+ handler = do_alignment_t32_to_handler(&instr, regs, &offset);
+ else
+ handler = do_alignment_ldmstm;
break;
default:
goto bad;
}
+ if (!handler)
+ goto bad;
type = handler(addr, instr, regs);
- if (type == TYPE_ERROR || type == TYPE_FAULT)
+ if (type == TYPE_ERROR || type == TYPE_FAULT) {
+ regs->ARM_pc -= isize;
goto bad_or_fault;
+ }
if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset);
@@ -735,7 +835,6 @@
bad_or_fault:
if (type == TYPE_ERROR)
goto bad;
- regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
/*
* We got a fault - fix it up, or die.
*/
@@ -751,8 +850,8 @@
*/
printk(KERN_ERR "Alignment trap: not handling instruction "
"%0*lx at [<%08lx>]\n",
- thumb_mode(regs) ? 4 : 8,
- thumb_mode(regs) ? tinstr : instr, instrptr);
+ isize << 1,
+ isize == 2 ? tinstr : instr, instrptr);
ai_skipped += 1;
return 1;
@@ -763,8 +862,8 @@
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
- thumb_mode(regs) ? 4 : 8,
- thumb_mode(regs) ? tinstr : instr,
+ isize << 1,
+ isize == 2 ? tinstr : instr,
addr, fsr);
if (ai_usermode & UM_FIXUP)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 0455557..6fdcbb7 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -208,7 +208,7 @@
* than endlessly redo the fault.
*/
survive:
- fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index fdaa9bb..4722582 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -836,6 +836,13 @@
BOOTMEM_EXCLUSIVE);
}
+ if (machine_is_treo680()) {
+ reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ }
+
if (machine_is_palmt5())
reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
BOOTMEM_EXCLUSIVE);
diff --git a/arch/arm/plat-omap/include/mach/sram.h b/arch/arm/plat-omap/include/mach/sram.h
index dca7c16..4d53cc5 100644
--- a/arch/arm/plat-omap/include/mach/sram.h
+++ b/arch/arm/plat-omap/include/mach/sram.h
@@ -24,7 +24,8 @@
extern u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2,
- u32 unlock_dll);
+ u32 unlock_dll, u32 f, u32 sdrc_mr,
+ u32 inc);
/* Do not use these */
extern void omap1_sram_reprogram_clock(u32 ckctl, u32 dpllctl);
@@ -62,7 +63,8 @@
extern u32 omap3_sram_configure_core_dpll(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2,
- u32 unlock_dll);
+ u32 unlock_dll, u32 f, u32 sdrc_mr,
+ u32 inc);
extern unsigned long omap3_sram_configure_core_dpll_sz;
#endif
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index a5b9bcd..65006df 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -371,15 +371,17 @@
static u32 (*_omap3_sram_configure_core_dpll)(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb,
- u32 m2, u32 unlock_dll);
+ u32 m2, u32 unlock_dll,
+ u32 f, u32 sdrc_mr, u32 inc);
u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, u32 sdrc_actim_ctrla,
- u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll)
+ u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll,
+ u32 f, u32 sdrc_mr, u32 inc)
{
BUG_ON(!_omap3_sram_configure_core_dpll);
return _omap3_sram_configure_core_dpll(sdrc_rfr_ctrl,
sdrc_actim_ctrla,
sdrc_actim_ctrlb, m2,
- unlock_dll);
+ unlock_dll, f, sdrc_mr, inc);
}
/* REVISIT: Should this be same as omap34xx_sram_init() after off-idle? */
diff --git a/arch/arm/plat-s3c/Makefile b/arch/arm/plat-s3c/Makefile
index 6106514..74bb7cb 100644
--- a/arch/arm/plat-s3c/Makefile
+++ b/arch/arm/plat-s3c/Makefile
@@ -34,6 +34,7 @@
obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o
obj-y += dev-i2c0.o
obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
+obj-$(CONFIG_SND_S3C24XX_SOC) += dev-audio.o
obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o
diff --git a/arch/arm/plat-s3c/dev-audio.c b/arch/arm/plat-s3c/dev-audio.c
new file mode 100644
index 0000000..1322beb
--- /dev/null
+++ b/arch/arm/plat-s3c/dev-audio.c
@@ -0,0 +1,68 @@
+/* linux/arch/arm/plat-s3c/dev-audio.c
+ *
+ * Copyright 2009 Wolfson Microelectronics
+ * Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+
+
+static struct resource s3c64xx_iis0_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IIS0,
+ .end = S3C64XX_PA_IIS0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iis0 = {
+ .name = "s3c64xx-iis",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
+ .resource = s3c64xx_iis0_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iis0);
+
+static struct resource s3c64xx_iis1_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IIS1,
+ .end = S3C64XX_PA_IIS1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iis1 = {
+ .name = "s3c64xx-iis",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
+ .resource = s3c64xx_iis1_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iis1);
+
+static struct resource s3c64xx_iisv4_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IISV4,
+ .end = S3C64XX_PA_IISV4 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iisv4 = {
+ .name = "s3c64xx-iis-v4",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s3c64xx_iisv4_resource),
+ .resource = s3c64xx_iisv4_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iisv4);
diff --git a/arch/arm/plat-s3c/gpio-config.c b/arch/arm/plat-s3c/gpio-config.c
index 08044de..456969b 100644
--- a/arch/arm/plat-s3c/gpio-config.c
+++ b/arch/arm/plat-s3c/gpio-config.c
@@ -119,7 +119,7 @@
unsigned int shift = (off & 7) * 4;
u32 con;
- if (off < 8 && chip->chip.ngpio >= 8)
+ if (off < 8 && chip->chip.ngpio > 8)
reg -= 4;
if (s3c_gpio_is_cfg_special(cfg)) {
diff --git a/arch/arm/plat-s3c/include/plat/devs.h b/arch/arm/plat-s3c/include/plat/devs.h
index a0b6768..b5b9c4d 100644
--- a/arch/arm/plat-s3c/include/plat/devs.h
+++ b/arch/arm/plat-s3c/include/plat/devs.h
@@ -24,13 +24,16 @@
extern struct platform_device s3c_device_timer[];
+extern struct platform_device s3c64xx_device_iis0;
+extern struct platform_device s3c64xx_device_iis1;
+extern struct platform_device s3c64xx_device_iisv4;
+
extern struct platform_device s3c_device_fb;
extern struct platform_device s3c_device_usb;
extern struct platform_device s3c_device_lcd;
extern struct platform_device s3c_device_wdt;
extern struct platform_device s3c_device_i2c0;
extern struct platform_device s3c_device_i2c1;
-extern struct platform_device s3c_device_iis;
extern struct platform_device s3c_device_rtc;
extern struct platform_device s3c_device_adc;
extern struct platform_device s3c_device_sdi;
diff --git a/arch/arm/plat-s3c/include/plat/nand.h b/arch/arm/plat-s3c/include/plat/nand.h
index f4dcd14..18f9588 100644
--- a/arch/arm/plat-s3c/include/plat/nand.h
+++ b/arch/arm/plat-s3c/include/plat/nand.h
@@ -10,19 +10,26 @@
* published by the Free Software Foundation.
*/
-/* struct s3c2410_nand_set
+/**
+ * struct s3c2410_nand_set - define a set of one or more nand chips
+ * @disable_ecc: Entirely disable ECC - Dangerous
+ * @flash_bbt: Openmoko u-boot can create a Bad Block Table
+ * Setting this flag will allow the kernel to
+ * look for it at boot time and also skip the NAND
+ * scan.
+ * @nr_chips: Number of chips in this set
+ * @nr_partitions: Number of partitions pointed to by @partitions
+ * @name: Name of set (optional)
+ * @nr_map: Map for low-layer logical to physical chip numbers (option)
+ * @partitions: The mtd partition list
*
- * define an set of one or more nand chips registered with an unique mtd
- *
- * nr_chips = number of chips in this set
- * nr_partitions = number of partitions pointed to be partitoons (or zero)
- * name = name of set (optional)
- * nr_map = map for low-layer logical to physical chip numbers (option)
- * partitions = mtd partition list
-*/
-
+ * define a set of one or more nand chips registered with an unique mtd. Also
+ * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger
+ * a warning at boot time.
+ */
struct s3c2410_nand_set {
- unsigned int disable_ecc : 1;
+ unsigned int disable_ecc:1;
+ unsigned int flash_bbt:1;
int nr_chips;
int nr_partitions;
@@ -39,7 +46,7 @@
int twrph0; /* active time for nWE/nOE */
int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
- unsigned int ignore_unset_ecc : 1;
+ unsigned int ignore_unset_ecc:1;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/arch/arm/plat-s3c64xx/Makefile b/arch/arm/plat-s3c64xx/Makefile
index 2ed5df3..3c8882c 100644
--- a/arch/arm/plat-s3c64xx/Makefile
+++ b/arch/arm/plat-s3c64xx/Makefile
@@ -23,6 +23,7 @@
obj-$(CONFIG_CPU_S3C6400_INIT) += s3c6400-init.o
obj-$(CONFIG_CPU_S3C6400_CLOCK) += s3c6400-clock.o
+obj-$(CONFIG_CPU_FREQ_S3C64XX) += cpufreq.o
# PM support
diff --git a/arch/arm/plat-s3c64xx/clock.c b/arch/arm/plat-s3c64xx/clock.c
index 0bc2fa1..7a36e89 100644
--- a/arch/arm/plat-s3c64xx/clock.c
+++ b/arch/arm/plat-s3c64xx/clock.c
@@ -191,7 +191,7 @@
.id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_UHOST,
+ .ctrlbit = S3C_CLKCON_HCLK_UHOST,
}, {
.name = "hsmmc",
.id = 0,
diff --git a/arch/arm/plat-s3c64xx/cpufreq.c b/arch/arm/plat-s3c64xx/cpufreq.c
new file mode 100644
index 0000000..e6e0843
--- /dev/null
+++ b/arch/arm/plat-s3c64xx/cpufreq.c
@@ -0,0 +1,262 @@
+/* linux/arch/arm/plat-s3c64xx/cpufreq.c
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * S3C64xx CPUfreq Support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+
+static struct clk *armclk;
+static struct regulator *vddarm;
+
+#ifdef CONFIG_CPU_S3C6410
+struct s3c64xx_dvfs {
+ unsigned int vddarm_min;
+ unsigned int vddarm_max;
+};
+
+static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ [0] = { 1000000, 1000000 },
+ [1] = { 1000000, 1050000 },
+ [2] = { 1050000, 1100000 },
+ [3] = { 1050000, 1150000 },
+ [4] = { 1250000, 1350000 },
+};
+
+static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ { 0, 66000 },
+ { 0, 133000 },
+ { 1, 222000 },
+ { 1, 266000 },
+ { 2, 333000 },
+ { 2, 400000 },
+ { 3, 532000 },
+ { 3, 533000 },
+ { 4, 667000 },
+ { 0, CPUFREQ_TABLE_END },
+};
+#endif
+
+static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
+}
+
+static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
+{
+ if (cpu != 0)
+ return 0;
+
+ return clk_get_rate(armclk) / 1000;
+}
+
+static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int ret;
+ unsigned int i;
+ struct cpufreq_freqs freqs;
+ struct s3c64xx_dvfs *dvfs;
+
+ ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
+ target_freq, relation, &i);
+ if (ret != 0)
+ return ret;
+
+ freqs.cpu = 0;
+ freqs.old = clk_get_rate(armclk) / 1000;
+ freqs.new = s3c64xx_freq_table[i].frequency;
+ freqs.flags = 0;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && freqs.new > freqs.old) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ freqs.new, ret);
+ goto err;
+ }
+ }
+#endif
+
+ ret = clk_set_rate(armclk, freqs.new * 1000);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
+ freqs.new, ret);
+ goto err;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && freqs.new < freqs.old) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ freqs.new, ret);
+ goto err_clk;
+ }
+ }
+#endif
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ pr_debug("cpufreq: Set actual frequency %lukHz\n",
+ clk_get_rate(armclk) / 1000);
+
+ return 0;
+
+err_clk:
+ if (clk_set_rate(armclk, freqs.old * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+err:
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+#ifdef CONFIG_REGULATOR
+static void __init s3c64xx_cpufreq_constrain_voltages(void)
+{
+ int count, v, i, found;
+ struct cpufreq_frequency_table *freq;
+ struct s3c64xx_dvfs *dvfs;
+
+ count = regulator_count_voltages(vddarm);
+ if (count < 0) {
+ pr_err("cpufreq: Unable to check supported voltages\n");
+ return;
+ }
+
+ freq = s3c64xx_freq_table;
+ while (freq->frequency != CPUFREQ_TABLE_END) {
+ if (freq->frequency == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ dvfs = &s3c64xx_dvfs_table[freq->index];
+ found = 0;
+
+ for (i = 0; i < count; i++) {
+ v = regulator_list_voltage(vddarm, i);
+ if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
+ found = 1;
+ }
+
+ if (!found) {
+ pr_debug("cpufreq: %dkHz unsupported by regulator\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ freq++;
+ }
+}
+#endif
+
+static int __init s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ int ret;
+ struct cpufreq_frequency_table *freq;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ if (s3c64xx_freq_table == NULL) {
+ pr_err("cpufreq: No frequency information for this CPU\n");
+ return -ENODEV;
+ }
+
+ armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(armclk)) {
+ pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
+ PTR_ERR(armclk));
+ return PTR_ERR(armclk);
+ }
+
+#ifdef CONFIG_REGULATOR
+ vddarm = regulator_get(NULL, "vddarm");
+ if (IS_ERR(vddarm)) {
+ ret = PTR_ERR(vddarm);
+ pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
+ pr_err("cpufreq: Only frequency scaling available\n");
+ vddarm = NULL;
+ } else {
+ s3c64xx_cpufreq_constrain_voltages();
+ }
+#endif
+
+ freq = s3c64xx_freq_table;
+ while (freq->frequency != CPUFREQ_TABLE_END) {
+ unsigned long r;
+
+ /* Check for frequencies we can generate */
+ r = clk_round_rate(armclk, freq->frequency * 1000);
+ r /= 1000;
+ if (r != freq->frequency)
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+
+ /* If we have no regulator then assume startup
+ * frequency is the maximum we can support. */
+ if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+
+ freq++;
+ }
+
+ policy->cur = clk_get_rate(armclk) / 1000;
+
+ /* Pick a conservative guess in ns: we'll need ~1 I2C/SPI
+ * write plus clock reprogramming. */
+ policy->cpuinfo.transition_latency = 2 * 1000 * 1000;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to configure frequency table: %d\n",
+ ret);
+ regulator_put(vddarm);
+ clk_put(armclk);
+ }
+
+ return ret;
+}
+
+static struct cpufreq_driver s3c64xx_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .flags = 0,
+ .verify = s3c64xx_cpufreq_verify_speed,
+ .target = s3c64xx_cpufreq_set_target,
+ .get = s3c64xx_cpufreq_get_speed,
+ .init = s3c64xx_cpufreq_driver_init,
+ .name = "s3c",
+};
+
+static int __init s3c64xx_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
+}
+module_init(s3c64xx_cpufreq_init);
diff --git a/arch/arm/plat-s3c64xx/gpiolib.c b/arch/arm/plat-s3c64xx/gpiolib.c
index da7b60e..9285929 100644
--- a/arch/arm/plat-s3c64xx/gpiolib.c
+++ b/arch/arm/plat-s3c64xx/gpiolib.c
@@ -321,6 +321,11 @@
.get_pull = s3c_gpio_getpull_updown,
};
+int s3c64xx_gpio2int_gpn(struct gpio_chip *chip, unsigned pin)
+{
+ return IRQ_EINT(0) + pin;
+}
+
static struct s3c_gpio_chip gpio_2bit[] = {
{
.base = S3C64XX_GPF_BASE,
@@ -353,6 +358,7 @@
.base = S3C64XX_GPN(0),
.ngpio = S3C64XX_GPIO_N_NR,
.label = "GPN",
+ .to_irq = s3c64xx_gpio2int_gpn,
},
}, {
.base = S3C64XX_GPO_BASE,
diff --git a/arch/arm/plat-s3c64xx/include/plat/regs-clock.h b/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
index 52836d4..a8777a7 100644
--- a/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
+++ b/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
@@ -88,11 +88,11 @@
#define S3C6400_CLKDIV2_SPI0_SHIFT (0)
/* HCLK GATE Registers */
-#define S3C_CLKCON_HCLK_BUS (1<<30)
-#define S3C_CLKCON_HCLK_SECUR (1<<29)
-#define S3C_CLKCON_HCLK_SDMA1 (1<<28)
-#define S3C_CLKCON_HCLK_SDMA2 (1<<27)
-#define S3C_CLKCON_HCLK_UHOST (1<<26)
+#define S3C_CLKCON_HCLK_3DSE (1<<31)
+#define S3C_CLKCON_HCLK_UHOST (1<<29)
+#define S3C_CLKCON_HCLK_SECUR (1<<28)
+#define S3C_CLKCON_HCLK_SDMA1 (1<<27)
+#define S3C_CLKCON_HCLK_SDMA0 (1<<26)
#define S3C_CLKCON_HCLK_IROM (1<<25)
#define S3C_CLKCON_HCLK_DDR1 (1<<24)
#define S3C_CLKCON_HCLK_DDR0 (1<<23)
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index fec6467..33026ef 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Fri May 29 10:14:20 2009
+# Last update: Sat Jun 20 22:28:39 2009
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -1455,7 +1455,7 @@
h6044 MACH_H6044 H6044 1458
app MACH_APP APP 1459
tct_hammer MACH_TCT_HAMMER TCT_HAMMER 1460
-herald MACH_HERMES HERMES 1461
+herald MACH_HERALD HERALD 1461
artemis MACH_ARTEMIS ARTEMIS 1462
htctitan MACH_HTCTITAN HTCTITAN 1463
qranium MACH_QRANIUM QRANIUM 1464
@@ -2245,3 +2245,38 @@
omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
simcom MACH_SIMCOM SIMCOM 2259
mcwebio MACH_MCWEBIO MCWEBIO 2260
+omap3_phrazer MACH_OMAP3_PHRAZER OMAP3_PHRAZER 2261
+darwin MACH_DARWIN DARWIN 2262
+oratiscomu MACH_ORATISCOMU ORATISCOMU 2263
+rtsbc20 MACH_RTSBC20 RTSBC20 2264
+i780 MACH_I780 I780 2265
+gemini324 MACH_GEMINI324 GEMINI324 2266
+oratislan MACH_ORATISLAN ORATISLAN 2267
+oratisalog MACH_ORATISALOG ORATISALOG 2268
+oratismadi MACH_ORATISMADI ORATISMADI 2269
+oratisot16 MACH_ORATISOT16 ORATISOT16 2270
+oratisdesk MACH_ORATISDESK ORATISDESK 2271
+v2p_ca9 MACH_V2P_CA9 V2P_CA9 2272
+sintexo MACH_SINTEXO SINTEXO 2273
+cm3389 MACH_CM3389 CM3389 2274
+omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
+sgh_i900 MACH_SGH_I900 SGH_I900 2276
+bst100 MACH_BST100 BST100 2277
+passion MACH_PASSION PASSION 2278
+indesign_at91sam MACH_INDESIGN_AT91SAM INDESIGN_AT91SAM 2279
+c4_badger MACH_C4_BADGER C4_BADGER 2280
+c4_viper MACH_C4_VIPER C4_VIPER 2281
+d2net MACH_D2NET D2NET 2282
+bigdisk MACH_BIGDISK BIGDISK 2283
+notalvision MACH_NOTALVISION NOTALVISION 2284
+omap3_kboc MACH_OMAP3_KBOC OMAP3_KBOC 2285
+cyclone MACH_CYCLONE CYCLONE 2286
+ninja MACH_NINJA NINJA 2287
+at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288
+bcmring MACH_BCMRING BCMRING 2289
+resol_dl2 MACH_RESOL_DL2 RESOL_DL2 2290
+ifosw MACH_IFOSW IFOSW 2291
+htcrhodium MACH_HTCRHODIUM HTCRHODIUM 2292
+htctopaz MACH_HTCTOPAZ HTCTOPAZ 2293
+matrix504 MACH_MATRIX504 MATRIX504 2294
+mrfsa MACH_MRFSA MRFSA 2295
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 62d4abb..b61d86d 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -133,7 +133,7 @@
* fault.
*/
survive:
- fault = handle_mm_fault(mm, vma, address, writeaccess);
+ fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 8ea0d94..7faa2f5 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -274,7 +274,7 @@
config BF_REV_0_1
bool "0.1"
- depends on (BF52x || (BF54x && !BF54xM))
+ depends on (BF51x || BF52x || (BF54x && !BF54xM))
config BF_REV_0_2
bool "0.2"
@@ -358,7 +358,7 @@
config MEM_MT48LC32M16A2TG_75
bool
- depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP || BFIN526_EZBRD)
+ depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP)
default y
config MEM_MT48LC32M8A2_75
@@ -366,6 +366,11 @@
depends on (BFIN518F_EZBRD)
default y
+config MEM_MT48H32M16LFCJ_75
+ bool
+ depends on (BFIN526_EZBRD)
+ default y
+
source "arch/blackfin/mach-bf518/Kconfig"
source "arch/blackfin/mach-bf527/Kconfig"
source "arch/blackfin/mach-bf533/Kconfig"
@@ -623,7 +628,6 @@
config TICKSOURCE_GPTMR0
bool "Gptimer0 (SCLK domain)"
select BFIN_GPTIMERS
- depends on !IPIPE
config TICKSOURCE_CORETMR
bool "Core timer (CCLK domain)"
@@ -644,6 +648,7 @@
config GPTMR0_CLOCKSOURCE
bool "Use GPTimer0 as a clocksource (higher rating)"
+ select BFIN_GPTIMERS
depends on GENERIC_CLOCKEVENTS
depends on !TICKSOURCE_GPTMR0
@@ -908,76 +913,97 @@
comment "Cache Support"
+
config BFIN_ICACHE
bool "Enable ICACHE"
+ default y
+config BFIN_ICACHE_LOCK
+ bool "Enable Instruction Cache Locking"
+ depends on BFIN_ICACHE
+ default n
+config BFIN_EXTMEM_ICACHEABLE
+ bool "Enable ICACHE for external memory"
+ depends on BFIN_ICACHE
+ default y
+config BFIN_L2_ICACHEABLE
+ bool "Enable ICACHE for L2 SRAM"
+ depends on BFIN_ICACHE
+ depends on BF54x || BF561
+ default n
+
config BFIN_DCACHE
bool "Enable DCACHE"
+ default y
config BFIN_DCACHE_BANKA
bool "Enable only 16k BankA DCACHE - BankB is SRAM"
depends on BFIN_DCACHE && !BF531
default n
-config BFIN_ICACHE_LOCK
- bool "Enable Instruction Cache Locking"
-
-choice
- prompt "External memory cache policy"
+config BFIN_EXTMEM_DCACHEABLE
+ bool "Enable DCACHE for external memory"
depends on BFIN_DCACHE
- default BFIN_WB if !SMP
- default BFIN_WT if SMP
-config BFIN_WB
- bool "Write back"
- depends on !SMP
- help
- Write Back Policy:
- Cached data will be written back to SDRAM only when needed.
- This can give a nice increase in performance, but beware of
- broken drivers that do not properly invalidate/flush their
- cache.
-
- Write Through Policy:
- Cached data will always be written back to SDRAM when the
- cache is updated. This is a completely safe setting, but
- performance is worse than Write Back.
-
- If you are unsure of the options and you want to be safe,
- then go with Write Through.
-
-config BFIN_WT
- bool "Write through"
- help
- Write Back Policy:
- Cached data will be written back to SDRAM only when needed.
- This can give a nice increase in performance, but beware of
- broken drivers that do not properly invalidate/flush their
- cache.
-
- Write Through Policy:
- Cached data will always be written back to SDRAM when the
- cache is updated. This is a completely safe setting, but
- performance is worse than Write Back.
-
- If you are unsure of the options and you want to be safe,
- then go with Write Through.
-
-endchoice
-
+ default y
choice
- prompt "L2 SRAM cache policy"
- depends on (BF54x || BF561)
- default BFIN_L2_WT
-config BFIN_L2_WB
+ prompt "External memory DCACHE policy"
+ depends on BFIN_EXTMEM_DCACHEABLE
+ default BFIN_EXTMEM_WRITEBACK if !SMP
+ default BFIN_EXTMEM_WRITETHROUGH if SMP
+config BFIN_EXTMEM_WRITEBACK
bool "Write back"
depends on !SMP
+ help
+ Write Back Policy:
+ Cached data will be written back to SDRAM only when needed.
+ This can give a nice increase in performance, but beware of
+ broken drivers that do not properly invalidate/flush their
+ cache.
-config BFIN_L2_WT
+ Write Through Policy:
+ Cached data will always be written back to SDRAM when the
+ cache is updated. This is a completely safe setting, but
+ performance is worse than Write Back.
+
+ If you are unsure of the options and you want to be safe,
+ then go with Write Through.
+
+config BFIN_EXTMEM_WRITETHROUGH
bool "Write through"
- depends on !SMP
+ help
+ Write Back Policy:
+ Cached data will be written back to SDRAM only when needed.
+ This can give a nice increase in performance, but beware of
+ broken drivers that do not properly invalidate/flush their
+ cache.
-config BFIN_L2_NOT_CACHED
- bool "Not cached"
+ Write Through Policy:
+ Cached data will always be written back to SDRAM when the
+ cache is updated. This is a completely safe setting, but
+ performance is worse than Write Back.
+
+ If you are unsure of the options and you want to be safe,
+ then go with Write Through.
endchoice
+config BFIN_L2_DCACHEABLE
+ bool "Enable DCACHE for L2 SRAM"
+ depends on BFIN_DCACHE
+ depends on BF54x || BF561
+ default n
+choice
+ prompt "L2 SRAM DCACHE policy"
+ depends on BFIN_L2_DCACHEABLE
+ default BFIN_L2_WRITEBACK
+config BFIN_L2_WRITEBACK
+ bool "Write back"
+ depends on !SMP
+
+config BFIN_L2_WRITETHROUGH
+ bool "Write through"
+ depends on !SMP
+endchoice
+
+
+comment "Memory Protection Unit"
config MPU
bool "Enable the memory protection unit (EXPERIMENTAL)"
default n
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 3ab6f23..fd9ccc5 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -13,7 +13,7 @@
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
- -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
+ -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
-e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
-d $< $@
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index baec133..dcfb488 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -326,11 +326,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -413,11 +419,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -916,7 +922,7 @@
# CONFIG_MMC_SDHCI is not set
CONFIG_SDH_BFIN=m
CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y
-CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ=y
+# CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -1147,7 +1153,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index c06262e..48a3a7a 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -331,16 +331,18 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_MPU is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
#
-# Asynchonous Memory Configuration
+# Memory Protection Unit
#
+# CONFIG_MPU is not set
#
# EBIU_AMGCTL Global Control
@@ -418,11 +420,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1424,7 +1426,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index e9175c6..dd83527 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -331,11 +331,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -418,11 +424,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1505,7 +1511,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index 5aa63ba..4c04480 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -292,12 +292,21 @@
#
# Cache Support
#
+#
+# Cache Support
+#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -391,11 +400,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1052,7 +1061,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index fed2532..c99bbcd 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -293,11 +293,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -391,11 +397,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1216,7 +1222,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index f9ac20d..092ffda 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -300,11 +300,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -399,11 +405,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1269,7 +1275,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index ee98e22..fa698a8 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -311,11 +311,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -398,11 +404,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1203,7 +1209,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index deeabef..b3d3cab 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -366,14 +366,19 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_BFIN_L2_WB is not set
-CONFIG_BFIN_L2_WT=y
-# CONFIG_BFIN_L2_NOT_CACHED is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -459,11 +464,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1606,7 +1611,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index dcfbe2e..0313cd1 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -331,14 +331,19 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_BFIN_L2_WB is not set
-CONFIG_BFIN_L2_WT=y
-# CONFIG_BFIN_L2_NOT_CACHED is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -425,11 +430,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1044,7 +1049,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 174c578..5d944ff 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -285,11 +285,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index e17875e..648a31d 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -329,11 +329,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -417,11 +423,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1246,7 +1252,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index fafd95e..ae665b9 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -262,12 +262,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -353,10 +358,10 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_DIAG=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -873,7 +878,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index e73aa5a..d74b6f4 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -297,11 +297,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -383,11 +389,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -861,7 +867,7 @@
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 8021130..7fc8dfa 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -270,12 +270,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -361,10 +366,10 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_DIAG=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -901,7 +906,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index dd815f0..acca4e5 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -333,12 +333,19 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -428,11 +435,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1334,7 +1341,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_SAMPLES is not set
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index 16c198b..bae4ee6 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -308,12 +308,19 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -395,11 +402,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -837,7 +844,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_SAMPLES is not set
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 6b4c1a9..a6a7c8e 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -258,12 +258,18 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_ICACHE_LOCK=y
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
+# CONFIG_MPU is not set
#
# Asynchonous Memory Configuration
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index 09701f9..ff377fa 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -295,11 +295,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -382,11 +388,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index ec84a53..814f9ca 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -279,12 +279,18 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
+# CONFIG_MPU is not set
#
# Asynchonous Memory Configuration
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 6e27962..375e75a 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -287,11 +287,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -709,7 +715,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index 8bb2cb1..4d44395 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -86,6 +86,7 @@
#endif /* __ASSEMBLY__ */
+#include <asm/mem_map.h>
#include <mach/blackfin.h>
#include <asm/bfin-global.h>
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 2ef669e..477050a 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -35,10 +35,10 @@
#if defined(CONFIG_SMP) && \
!defined(CONFIG_BFIN_CACHE_COHERENT)
-# if defined(CONFIG_BFIN_ICACHE)
+# if defined(CONFIG_BFIN_ICACHEABLE) || defined(CONFIG_BFIN_L2_ICACHEABLE)
# define __ARCH_SYNC_CORE_ICACHE
# endif
-# if defined(CONFIG_BFIN_DCACHE)
+# if defined(CONFIG_BFIN_DCACHEABLE) || defined(CONFIG_BFIN_L2_DCACHEABLE)
# define __ARCH_SYNC_CORE_DCACHE
# endif
#ifndef __ASSEMBLY__
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 5c17dee..7e55549e 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -56,7 +56,7 @@
static inline void flush_icache_range(unsigned start, unsigned end)
{
-#if defined(CONFIG_BFIN_WB)
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
blackfin_dcache_flush_range(start, end);
#endif
@@ -87,9 +87,9 @@
#else
# define invalidate_dcache_range(start,end) do { } while (0)
#endif
-#if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_WB)
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
# define flush_dcache_range(start,end) blackfin_dcache_flush_range((start), (end))
-# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
+# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
#else
# define flush_dcache_range(start,end) do { } while (0)
# define flush_dcache_page(page) do { } while (0)
@@ -100,7 +100,7 @@
static inline int bfin_addr_dcacheable(unsigned long addr)
{
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
if (addr < (_ramend - DMA_UNCACHED_REGION))
return 1;
#endif
@@ -109,7 +109,7 @@
addr >= _ramend && addr < physical_mem_end)
return 1;
-#ifndef CONFIG_BFIN_L2_NOT_CACHED
+#ifdef CONFIG_BFIN_L2_DCACHEABLE
if (addr >= L2_START && addr < L2_START + L2_LENGTH)
return 1;
#endif
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index a75a6a9..c5dacf8 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -37,8 +37,6 @@
#define L1_IMEMORY ( CPLB_USER_RD | CPLB_VALID | CPLB_LOCK)
#define SDRAM_INON_CHBL ( CPLB_USER_RD | CPLB_VALID)
-/*Use the menuconfig cache policy here - CONFIG_BFIN_WT/CONFIG_BFIN_WB*/
-
#if ANOMALY_05000158
#define ANOMALY_05000158_WORKAROUND 0x200
#else
@@ -47,10 +45,12 @@
#define CPLB_COMMON (CPLB_DIRTY | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND)
-#ifdef CONFIG_BFIN_WB /*Write Back Policy */
+#ifdef CONFIG_BFIN_EXTMEM_WRITEBACK
#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_COMMON)
-#else /*Write Through */
+#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON)
+#else
+#define SDRAM_DGENERIC (CPLB_COMMON)
#endif
#define SDRAM_DNON_CHBL (CPLB_COMMON)
@@ -61,21 +61,23 @@
#ifdef CONFIG_SMP
#define L2_ATTR (INITIAL_T | I_CPLB | D_CPLB)
-#define L2_IMEMORY (CPLB_COMMON)
-#define L2_DMEMORY (CPLB_LOCK | CPLB_COMMON)
+#define L2_IMEMORY (CPLB_COMMON | PAGE_SIZE_1MB)
+#define L2_DMEMORY (CPLB_LOCK | CPLB_COMMON | PAGE_SIZE_1MB)
#else
#define L2_ATTR (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB)
-#define L2_IMEMORY (SDRAM_IGENERIC)
-
-# if defined(CONFIG_BFIN_L2_WB)
-# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_COMMON)
-# elif defined(CONFIG_BFIN_L2_WT)
-# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON)
-# elif defined(CONFIG_BFIN_L2_NOT_CACHED)
-# define L2_DMEMORY (CPLB_COMMON)
+# if defined(CONFIG_BFIN_L2_ICACHEABLE)
+# define L2_IMEMORY (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | PAGE_SIZE_1MB)
# else
-# define L2_DMEMORY (0)
+# define L2_IMEMORY ( CPLB_USER_RD | CPLB_VALID | PAGE_SIZE_1MB)
+# endif
+
+# if defined(CONFIG_BFIN_L2_WRITEBACK)
+# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_COMMON | PAGE_SIZE_1MB)
+# elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
+# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON | PAGE_SIZE_1MB)
+# else
+# define L2_DMEMORY (CPLB_COMMON | PAGE_SIZE_1MB)
# endif
#endif /* CONFIG_SMP */
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index d7d9148..ed6b1f3 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -95,4 +95,17 @@
enum dma_data_direction dir)
{
}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+}
+
#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index bbe1c37..87ba9ad 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,9 +35,9 @@
#include <asm/atomic.h>
#include <asm/traps.h>
-#define IPIPE_ARCH_STRING "1.10-00"
+#define IPIPE_ARCH_STRING "1.11-00"
#define IPIPE_MAJOR_NUMBER 1
-#define IPIPE_MINOR_NUMBER 10
+#define IPIPE_MINOR_NUMBER 11
#define IPIPE_PATCH_NUMBER 0
#ifdef CONFIG_SMP
@@ -207,7 +207,7 @@
int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
+#ifdef CONFIG_TICKSOURCE_CORETMR
#define IRQ_SYSTMR IRQ_CORETMR
#define IRQ_PRIOTMR IRQ_CORETMR
#else
@@ -240,8 +240,13 @@
#define ipipe_init_irq_threads() do { } while (0)
#define ipipe_start_irq_thread(irq, desc) 0
+#ifndef CONFIG_TICKSOURCE_GPTMR0
#define IRQ_SYSTMR IRQ_CORETMR
#define IRQ_PRIOTMR IRQ_CORETMR
+#else
+#define IRQ_SYSTMR IRQ_TIMER0
+#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
+#endif
#define __ipipe_root_tick_p(regs) 1
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h
index 3e8acbd..490098f 100644
--- a/arch/blackfin/include/asm/ipipe_base.h
+++ b/arch/blackfin/include/asm/ipipe_base.h
@@ -51,23 +51,23 @@
extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
-static inline void __ipipe_stall_root(void)
-{
- volatile unsigned long *p = &__ipipe_root_status;
- set_bit(0, p);
-}
+#define __ipipe_stall_root() \
+ do { \
+ volatile unsigned long *p = &__ipipe_root_status; \
+ set_bit(0, p); \
+ } while (0)
-static inline unsigned long __ipipe_test_and_stall_root(void)
-{
- volatile unsigned long *p = &__ipipe_root_status;
- return test_and_set_bit(0, p);
-}
+#define __ipipe_test_and_stall_root() \
+ ({ \
+ volatile unsigned long *p = &__ipipe_root_status; \
+ test_and_set_bit(0, p); \
+ })
-static inline unsigned long __ipipe_test_root(void)
-{
- const unsigned long *p = &__ipipe_root_status;
- return test_bit(0, p);
-}
+#define __ipipe_test_root() \
+ ({ \
+ const unsigned long *p = &__ipipe_root_status; \
+ test_bit(0, p); \
+ })
#endif /* !__ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 9a7f63a..42a15f5 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -22,13 +22,6 @@
/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
#include <mach/irq.h>
-/* Xenomai IPIPE helpers */
-#define local_irq_restore_hw(x) local_irq_restore(x)
-#define local_irq_save_hw(x) local_irq_save(x)
-#define local_irq_enable_hw(x) local_irq_enable(x)
-#define local_irq_disable_hw(x) local_irq_disable(x)
-#define irqs_disabled_hw(x) irqs_disabled(x)
-
#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
#else
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 139cba4..9b19a19 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -31,6 +31,150 @@
return flags;
}
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_base.h>
+#include <linux/ipipe_trace.h>
+
+#ifdef CONFIG_DEBUG_HWERR
+# define bfin_no_irqs 0x3f
+#else
+# define bfin_no_irqs 0x1f
+#endif
+
+#define raw_local_irq_disable() \
+ do { \
+ ipipe_check_context(ipipe_root_domain); \
+ __ipipe_stall_root(); \
+ barrier(); \
+ } while (0)
+
+static inline void raw_local_irq_enable(void)
+{
+ barrier();
+ ipipe_check_context(ipipe_root_domain);
+ __ipipe_unstall_root();
+}
+
+#define raw_local_save_flags_ptr(x) \
+ do { \
+ *(x) = __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags; \
+ } while (0)
+
+#define raw_local_save_flags(x) raw_local_save_flags_ptr(&(x))
+
+#define raw_irqs_disabled_flags(x) ((x) == bfin_no_irqs)
+
+#define raw_local_irq_save_ptr(x) \
+ do { \
+ *(x) = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags; \
+ barrier(); \
+ } while (0)
+
+#define raw_local_irq_save(x) \
+ do { \
+ ipipe_check_context(ipipe_root_domain); \
+ raw_local_irq_save_ptr(&(x)); \
+ } while (0)
+
+static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
+{
+ /*
+ * Merge virtual and real interrupt mask bits into a single
+ * 32bit word.
+ */
+ return (real & ~(1 << 31)) | ((virt != 0) << 31);
+}
+
+static inline int raw_demangle_irq_bits(unsigned long *x)
+{
+ int virt = (*x & (1 << 31)) != 0;
+ *x &= ~(1L << 31);
+ return virt;
+}
+
+static inline void local_irq_disable_hw_notrace(void)
+{
+ bfin_cli();
+}
+
+static inline void local_irq_enable_hw_notrace(void)
+{
+ bfin_sti(bfin_irq_flags);
+}
+
+#define local_save_flags_hw(flags) \
+ do { \
+ (flags) = bfin_read_IMASK(); \
+ } while (0)
+
+#define irqs_disabled_flags_hw(flags) (((flags) & ~0x3f) == 0)
+
+#define irqs_disabled_hw() \
+ ({ \
+ unsigned long flags; \
+ local_save_flags_hw(flags); \
+ irqs_disabled_flags_hw(flags); \
+ })
+
+static inline void local_irq_save_ptr_hw(unsigned long *flags)
+{
+ *flags = bfin_cli();
+#ifdef CONFIG_DEBUG_HWERR
+ bfin_sti(0x3f);
+#endif
+}
+
+#define local_irq_save_hw_notrace(flags) \
+ do { \
+ local_irq_save_ptr_hw(&(flags)); \
+ } while (0)
+
+static inline void local_irq_restore_hw_notrace(unsigned long flags)
+{
+ if (!irqs_disabled_flags_hw(flags))
+ local_irq_enable_hw_notrace();
+}
+
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+# define local_irq_disable_hw() \
+ do { \
+ if (!irqs_disabled_hw()) { \
+ local_irq_disable_hw_notrace(); \
+ ipipe_trace_begin(0x80000000); \
+ } \
+ } while (0)
+# define local_irq_enable_hw() \
+ do { \
+ if (irqs_disabled_hw()) { \
+ ipipe_trace_end(0x80000000); \
+ local_irq_enable_hw_notrace(); \
+ } \
+ } while (0)
+# define local_irq_save_hw(flags) \
+ do { \
+ local_save_flags_hw(flags); \
+ if (!irqs_disabled_flags_hw(flags)) { \
+ local_irq_disable_hw_notrace(); \
+ ipipe_trace_begin(0x80000001); \
+ } \
+ } while (0)
+# define local_irq_restore_hw(flags) \
+ do { \
+ if (!irqs_disabled_flags_hw(flags)) { \
+ ipipe_trace_end(0x80000001); \
+ local_irq_enable_hw_notrace(); \
+ } \
+ } while (0)
+#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
+# define local_irq_disable_hw() local_irq_disable_hw_notrace()
+# define local_irq_enable_hw() local_irq_enable_hw_notrace()
+# define local_irq_save_hw(flags) local_irq_save_hw_notrace(flags)
+# define local_irq_restore_hw(flags) local_irq_restore_hw_notrace(flags)
+#endif /* !CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#else /* CONFIG_IPIPE */
+
static inline void raw_local_irq_disable(void)
{
bfin_cli();
@@ -44,12 +188,6 @@
#define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0)
-static inline void raw_local_irq_restore(unsigned long flags)
-{
- if (!raw_irqs_disabled_flags(flags))
- raw_local_irq_enable();
-}
-
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags = bfin_cli();
@@ -60,4 +198,18 @@
}
#define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0)
+#define local_irq_save_hw(flags) raw_local_irq_save(flags)
+#define local_irq_restore_hw(flags) raw_local_irq_restore(flags)
+#define local_irq_enable_hw() raw_local_irq_enable()
+#define local_irq_disable_hw() raw_local_irq_disable()
+#define irqs_disabled_hw() irqs_disabled()
+
+#endif /* !CONFIG_IPIPE */
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+ if (!raw_irqs_disabled_flags(flags))
+ raw_local_irq_enable();
+}
+
#endif
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
index 61f7487..4179e32 100644
--- a/arch/blackfin/include/asm/mem_init.h
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -59,7 +59,7 @@
#define SDRAM_tRP TRP_1
#define SDRAM_tRP_num 1
#define SDRAM_tRAS TRAS_4
-#define SDRAM_tRAS_num 3
+#define SDRAM_tRAS_num 4
#define SDRAM_tRCD TRCD_1
#define SDRAM_tWR TWR_2
#endif
@@ -89,6 +89,85 @@
#endif
#endif
+/*
+ * The BF526-EZ-Board changed SDRAM chips between revisions,
+ * so we use below timings to accommodate both.
+ */
+#if defined(CONFIG_MEM_MT48H32M16LFCJ_75)
+#if (CONFIG_SCLK_HZ > 119402985)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_8
+#define SDRAM_tRAS_num 8
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_7
+#define SDRAM_tRAS_num 7
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_6
+#define SDRAM_tRAS_num 6
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_5
+#define SDRAM_tRAS_num 5
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_4
+#define SDRAM_tRAS_num 4
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_4
+#define SDRAM_tRAS_num 4
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_3
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_3
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ <= 29850746)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_2
+#define SDRAM_tRAS_num 2
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#endif
+
#if defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \
defined(CONFIG_MEM_MT48LC8M32B2B5_7)
/*SDRAM INFORMATION: */
@@ -109,6 +188,13 @@
#define SDRAM_CL CL_3
#endif
+#if defined(CONFIG_MEM_MT48H32M16LFCJ_75)
+ /*SDRAM INFORMATION: */
+#define SDRAM_Tref 64 /* Refresh period in milliseconds */
+#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */
+#define SDRAM_CL CL_2
+#endif
+
#ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC
/* Equation from section 17 (p17-46) of BF533 HRM */
diff --git a/arch/blackfin/include/asm/mem_map.h b/arch/blackfin/include/asm/mem_map.h
index e92b310..5e21627 100644
--- a/arch/blackfin/include/asm/mem_map.h
+++ b/arch/blackfin/include/asm/mem_map.h
@@ -1,87 +1,84 @@
/*
- * mem_map.h
- * Common header file for blackfin family of processors.
+ * Common Blackfin memory map
*
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_H_
-#define _MEM_MAP_H_
+#ifndef __BFIN_MEM_MAP_H__
+#define __BFIN_MEM_MAP_H__
#include <mach/mem_map.h>
-#ifndef __ASSEMBLY__
+/* Every Blackfin so far has MMRs like this */
+#ifndef COREMMR_BASE
+# define COREMMR_BASE 0xFFE00000
+#endif
+#ifndef SYSMMR_BASE
+# define SYSMMR_BASE 0xFFC00000
+#endif
-#ifdef CONFIG_SMP
-static inline ulong get_l1_scratch_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
-}
-static inline ulong get_l1_code_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_CODE_START : COREA_L1_CODE_START;
-}
-static inline ulong get_l1_data_a_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;
-}
-static inline ulong get_l1_data_b_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;
-}
+/* Every Blackfin so far has on-chip Scratch Pad SRAM like this */
+#ifndef L1_SCRATCH_START
+# define L1_SCRATCH_START 0xFFB00000
+# define L1_SCRATCH_LENGTH 0x1000
+#endif
-static inline ulong get_l1_scratch_start(void)
-{
- return get_l1_scratch_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_code_start(void)
-{
- return get_l1_code_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_data_a_start(void)
-{
- return get_l1_data_a_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_data_b_start(void)
-{
- return get_l1_data_b_start_cpu(blackfin_core_id());
-}
+/* Most parts lack on-chip L2 SRAM */
+#ifndef L2_START
+# define L2_START 0
+# define L2_LENGTH 0
+#endif
-#else /* !CONFIG_SMP */
+/* Most parts lack on-chip L1 ROM */
+#ifndef L1_ROM_START
+# define L1_ROM_START 0
+# define L1_ROM_LENGTH 0
+#endif
-static inline ulong get_l1_scratch_start_cpu(int cpu)
+/* Allow wonky SMP ports to override this */
+#ifndef GET_PDA_SAFE
+# define GET_PDA_SAFE(preg) \
+ preg.l = _cpu_pda; \
+ preg.h = _cpu_pda;
+# define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
+
+# ifndef __ASSEMBLY__
+
+static inline unsigned long get_l1_scratch_start_cpu(int cpu)
{
return L1_SCRATCH_START;
}
-static inline ulong get_l1_code_start_cpu(int cpu)
+static inline unsigned long get_l1_code_start_cpu(int cpu)
{
return L1_CODE_START;
}
-static inline ulong get_l1_data_a_start_cpu(int cpu)
+static inline unsigned long get_l1_data_a_start_cpu(int cpu)
{
return L1_DATA_A_START;
}
-static inline ulong get_l1_data_b_start_cpu(int cpu)
+static inline unsigned long get_l1_data_b_start_cpu(int cpu)
{
return L1_DATA_B_START;
}
-static inline ulong get_l1_scratch_start(void)
+static inline unsigned long get_l1_scratch_start(void)
{
return get_l1_scratch_start_cpu(0);
}
-static inline ulong get_l1_code_start(void)
+static inline unsigned long get_l1_code_start(void)
{
return get_l1_code_start_cpu(0);
}
-static inline ulong get_l1_data_a_start(void)
+static inline unsigned long get_l1_data_a_start(void)
{
return get_l1_data_a_start_cpu(0);
}
-static inline ulong get_l1_data_b_start(void)
+static inline unsigned long get_l1_data_b_start(void)
{
return get_l1_data_b_start_cpu(0);
}
-#endif /* CONFIG_SMP */
-#endif /* __ASSEMBLY__ */
+# endif /* __ASSEMBLY__ */
+#endif /* !GET_PDA_SAFE */
-#endif /* _MEM_MAP_H_ */
+#endif
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index 294dbda..85e8f16 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -135,11 +135,13 @@
};
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+#include <mach/blackfin.h>
+
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
unsigned long tmp = 0;
- unsigned long flags = 0;
+ unsigned long flags;
local_irq_save_hw(flags);
diff --git a/arch/blackfin/include/asm/traps.h b/arch/blackfin/include/asm/traps.h
index 34f7295..3cdc454 100644
--- a/arch/blackfin/include/asm/traps.h
+++ b/arch/blackfin/include/asm/traps.h
@@ -111,9 +111,7 @@
level " bits in the Watchpoint Instruction Address Control register (WPIACTL) is set.\n"
#define EXC_0x2A(level) \
"Instruction fetch misaligned address violation\n" \
- level " - Attempted misaligned instruction cache fetch. On a misaligned instruction fetch\n" \
- level " exception, the return address provided in RETX is the destination address which is\n" \
- level " misaligned, rather than the address of the offending instruction.\n"
+ level " - Attempted misaligned instruction cache fetch.\n"
#define EXC_0x2B(level) \
"CPLB protection violation\n" \
level " - Illegal instruction fetch access (memory protection violation).\n"
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 8894e9f..2f469a1 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -265,4 +265,26 @@
#define clear_user(to, n) __clear_user(to, n)
+/* How to interpret these return values:
+ * CORE: can be accessed by core load or dma memcpy
+ * CORE_ONLY: can only be accessed by core load
+ * DMA: can only be accessed by dma memcpy
+ * IDMA: can only be accessed by interprocessor dma memcpy (BF561)
+ * ITEST: can be accessed by isram memcpy or dma memcpy
+ */
+enum {
+ BFIN_MEM_ACCESS_CORE = 0,
+ BFIN_MEM_ACCESS_CORE_ONLY,
+ BFIN_MEM_ACCESS_DMA,
+ BFIN_MEM_ACCESS_IDMA,
+ BFIN_MEM_ACCESS_ITEST,
+};
+/**
+ * bfin_mem_access_type() - what kind of memory access is required
+ * @addr: the address to check
+ * @size: number of bytes needed
+ * @return: <0 is error, >=0 is BFIN_MEM_ACCESS_xxx enum (see above)
+ */
+int bfin_mem_access_type(unsigned long addr, unsigned long size);
+
#endif /* _BLACKFIN_UACCESS_H */
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index da35133..c8e7ee4 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -381,8 +381,9 @@
#define __NR_preadv 366
#define __NR_pwritev 367
#define __NR_rt_tgsigqueueinfo 368
+#define __NR_perf_counter_open 369
-#define __NR_syscall 369
+#define __NR_syscall 370
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 3731088..141d928 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -20,7 +20,6 @@
CFLAGS_REMOVE_ftrace.o = -pg
obj-$(CONFIG_IPIPE) += ipipe.o
-obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index c006a44..36193ee 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -46,13 +46,13 @@
printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
-#ifdef CONFIG_BFIN_ICACHE
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
d_cache = CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_WT
+#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH
d_cache |= CPLB_L1_AOW | CPLB_WT;
#endif
#endif
@@ -91,9 +91,9 @@
/* Cover L2 memory */
#if L2_LENGTH > 0
dcplb_tbl[cpu][i_d].addr = L2_START;
- dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB;
+ dcplb_tbl[cpu][i_d++].data = L2_DMEMORY;
icplb_tbl[cpu][i_i].addr = L2_START;
- icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB;
+ icplb_tbl[cpu][i_i++].data = L2_IMEMORY;
#endif
first_mask_dcplb = i_d;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 784923e..bcdfe9b 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -150,15 +150,19 @@
nr_dcplb_miss[cpu]++;
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
if (bfin_addr_dcacheable(addr)) {
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
-#ifdef CONFIG_BFIN_WT
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
d_data |= CPLB_L1_AOW | CPLB_WT;
-#endif
+# endif
}
#endif
- if (addr >= physical_mem_end) {
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else if (addr >= physical_mem_end) {
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
&& (status & FAULT_USERSUPV)) {
addr &= ~0x3fffff;
@@ -235,7 +239,7 @@
i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_ICACHE
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
/*
* Normal RAM, and possibly the reserved memory area, are
* cacheable.
@@ -245,7 +249,10 @@
i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
- if (addr >= physical_mem_end) {
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ i_data = L2_IMEMORY;
+ } else if (addr >= physical_mem_end) {
if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & FAULT_USERSUPV)) {
addr &= ~(1 * 1024 * 1024 - 1);
@@ -365,13 +372,18 @@
local_irq_save_hw(flags);
current_rwx_mask[cpu] = masks;
- d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_DCACHE
- d_data |= CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_WT
- d_data |= CPLB_L1_AOW | CPLB_WT;
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else {
+ d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ d_data |= CPLB_L1_CHBL;
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_data |= CPLB_L1_AOW | CPLB_WT;
+# endif
#endif
-#endif
+ }
disable_dcplb();
for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index d8cde1f..b8d2203 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -52,7 +52,7 @@
atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
-unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
+unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
EXPORT_SYMBOL(__ipipe_irq_lvmask);
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
@@ -342,8 +342,3 @@
}
EXPORT_SYMBOL(show_stack);
-
-#ifdef CONFIG_IPIPE_TRACE_MCOUNT
-void notrace _mcount(void);
-EXPORT_SYMBOL(_mcount);
-#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 6e31e93..4b5fd36 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -38,38 +38,15 @@
#include <asm/pda.h>
static atomic_t irq_err_count;
-static spinlock_t irq_controller_lock;
-
-/*
- * Dummy mask/unmask handler
- */
-void dummy_mask_unmask_irq(unsigned int irq)
-{
-}
-
void ack_bad_irq(unsigned int irq)
{
atomic_inc(&irq_err_count);
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
-static struct irq_chip bad_chip = {
- .ack = dummy_mask_unmask_irq,
- .mask = dummy_mask_unmask_irq,
- .unmask = dummy_mask_unmask_irq,
-};
-
-static int bad_stats;
static struct irq_desc bad_irq_desc = {
- .status = IRQ_DISABLED,
- .chip = &bad_chip,
.handle_irq = handle_bad_irq,
- .depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
- .kstat_irqs = &bad_stats,
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -77,6 +54,7 @@
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
+#ifdef CONFIG_PROC_FS
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
@@ -108,50 +86,29 @@
}
return 0;
}
-
-/*
- * do_IRQ handles all hardware IRQs. Decoded IRQs should not
- * come via this function. Instead, they should provide their
- * own 'handler'
- */
-#ifdef CONFIG_DO_IRQ_L1
-__attribute__((l1_text))
#endif
-asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
-{
- struct pt_regs *old_regs;
- struct irq_desc *desc = irq_desc + irq;
-#ifndef CONFIG_IPIPE
- unsigned short pending, other_ints;
-#endif
- old_regs = set_irq_regs(regs);
- /*
- * Some hardware gives randomly wrong interrupts. Rather
- * than crashing, do something sensible.
- */
- if (irq >= NR_IRQS)
- desc = &bad_irq_desc;
-
- irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
+static void check_stack_overflow(int irq)
+{
/* Debugging check for stack overflow: is there less than STACK_WARN free? */
- {
- long sp;
+ long sp = __get_SP() & (THREAD_SIZE - 1);
- sp = __get_SP() & (THREAD_SIZE-1);
-
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- dump_stack();
- printk(KERN_EMERG "%s: possible stack overflow while handling irq %i "
- " only %ld bytes free\n",
- __func__, irq, sp - sizeof(struct thread_info));
- }
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ dump_stack();
+ pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
+ irq, sp - sizeof(struct thread_info));
}
+}
+#else
+static inline void check_stack_overflow(int irq) { }
#endif
- generic_handle_irq(irq);
#ifndef CONFIG_IPIPE
+static void maybe_lower_to_irq14(void)
+{
+ unsigned short pending, other_ints;
+
/*
* If we're the only interrupt running (ignoring IRQ15 which
* is for syscalls), lower our priority to IRQ14 so that
@@ -165,7 +122,38 @@
other_ints = pending & (pending - 1);
if (other_ints == 0)
lower_to_irq14();
-#endif /* !CONFIG_IPIPE */
+}
+#else
+static inline void maybe_lower_to_irq14(void) { }
+#endif
+
+/*
+ * do_IRQ handles all hardware IRQs. Decoded IRQs should not
+ * come via this function. Instead, they should provide their
+ * own 'handler'
+ */
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ check_stack_overflow(irq);
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (irq >= NR_IRQS)
+ handle_bad_irq(irq, &bad_irq_desc);
+ else
+ generic_handle_irq(irq);
+
+ maybe_lower_to_irq14();
+
irq_exit();
set_irq_regs(old_regs);
@@ -173,14 +161,6 @@
void __init init_IRQ(void)
{
- struct irq_desc *desc;
- int irq;
-
- spin_lock_init(&irq_controller_lock);
- for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
- *desc = bad_irq_desc;
- }
-
init_arch_irq();
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index da28f79..cce79d0 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -34,15 +34,6 @@
#error change the definition of slavecpulocks
#endif
-#define IN_MEM(addr, size, l1_addr, l1_size) \
-({ \
- unsigned long __addr = (unsigned long)(addr); \
- (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
-})
-#define ASYNC_BANK_SIZE \
- (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
- ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
-
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
gdb_regs[BFIN_R0] = regs->r0;
@@ -463,40 +454,87 @@
static int validate_memory_access_address(unsigned long addr, int size)
{
- int cpu = raw_smp_processor_id();
-
- if (size < 0)
+ if (size < 0 || addr == 0)
return -EFAULT;
- if (addr >= 0x1000 && (addr + size) <= physical_mem_end)
- return 0;
- if (addr >= SYSMMR_BASE)
- return 0;
- if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
- return 0;
- if (cpu == 0) {
- if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return bfin_mem_access_type(addr, size);
+}
+
+static int bfin_probe_kernel_read(char *dst, char *src, int size)
+{
+ unsigned long lsrc = (unsigned long)src;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(lsrc, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (lsrc >= SYSMMR_BASE) {
+ if (size == 2 && lsrc % 2 == 0) {
+ u16 mmr = bfin_read16(src);
+ memcpy(dst, &mmr, sizeof(mmr));
return 0;
- if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ } else if (size == 4 && lsrc % 4 == 0) {
+ u32 mmr = bfin_read32(src);
+ memcpy(dst, &mmr, sizeof(mmr));
return 0;
- if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
- return 0;
- if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
- return 0;
-#ifdef CONFIG_SMP
- } else if (cpu == 1) {
- if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
- return 0;
- if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
- return 0;
- if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
- return 0;
- if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
- return 0;
-#endif
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return probe_kernel_read(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
}
- if (IN_MEM(addr, size, L2_START, L2_LENGTH))
- return 0;
+ return -EFAULT;
+}
+
+static int bfin_probe_kernel_write(char *dst, char *src, int size)
+{
+ unsigned long ldst = (unsigned long)dst;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(ldst, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (ldst >= SYSMMR_BASE) {
+ if (size == 2 && ldst % 2 == 0) {
+ u16 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write16(dst, mmr);
+ return 0;
+ } else if (size == 4 && ldst % 4 == 0) {
+ u32 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write32(dst, mmr);
+ return 0;
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return probe_kernel_write(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
+ }
return -EFAULT;
}
@@ -509,14 +547,6 @@
{
char *tmp;
int err;
- unsigned char *pch;
- unsigned short mmr16;
- unsigned long mmr32;
- int cpu = raw_smp_processor_id();
-
- err = validate_memory_access_address((unsigned long)mem, count);
- if (err)
- return err;
/*
* We use the upper half of buf as an intermediate buffer for the
@@ -524,44 +554,7 @@
*/
tmp = buf + count;
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (count) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = *(unsigned short *)mem;
- pch = (unsigned char *)&mmr16;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- tmp -= 2;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = *(unsigned long *)mem;
- pch = (unsigned char *)&mmr32;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- tmp -= 4;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM*/
- if (dma_memcpy(tmp, mem, count) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_read(tmp, mem, count);
-
+ err = bfin_probe_kernel_read(tmp, mem, count);
if (!err) {
while (count > 0) {
buf = pack_hex_byte(buf, *tmp);
@@ -582,13 +575,8 @@
*/
int kgdb_ebin2mem(char *buf, char *mem, int count)
{
- char *tmp_old;
- char *tmp_new;
- unsigned short *mmr16;
- unsigned long *mmr32;
- int err;
+ char *tmp_old, *tmp_new;
int size;
- int cpu = raw_smp_processor_id();
tmp_old = tmp_new = buf;
@@ -601,41 +589,7 @@
tmp_old++;
}
- err = validate_memory_access_address((unsigned long)mem, size);
- if (err)
- return err;
-
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (size) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = (unsigned short *)buf;
- *(unsigned short *)mem = *mmr16;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = (unsigned long *)buf;
- *(unsigned long *)mem = *mmr32;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(mem, buf, size) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_write(mem, buf, size);
-
- return err;
+ return bfin_probe_kernel_write(mem, buf, count);
}
/*
@@ -645,16 +599,7 @@
*/
int kgdb_hex2mem(char *buf, char *mem, int count)
{
- char *tmp_raw;
- char *tmp_hex;
- unsigned short *mmr16;
- unsigned long *mmr32;
- int err;
- int cpu = raw_smp_processor_id();
-
- err = validate_memory_access_address((unsigned long)mem, count);
- if (err)
- return err;
+ char *tmp_raw, *tmp_hex;
/*
* We use the upper half of buf as an intermediate buffer for the
@@ -669,39 +614,18 @@
*tmp_raw |= hex(*tmp_hex--) << 4;
}
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (count) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = (unsigned short *)tmp_raw;
- *(unsigned short *)mem = *mmr16;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = (unsigned long *)tmp_raw;
- *(unsigned long *)mem = *mmr32;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(mem, tmp_raw, count) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_write(mem, tmp_raw, count);
-
- return err;
+ return bfin_probe_kernel_write(mem, tmp_raw, count);
}
+#define IN_MEM(addr, size, l1_addr, l1_size) \
+({ \
+ unsigned long __addr = (unsigned long)(addr); \
+ (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
+})
+#define ASYNC_BANK_SIZE \
+ (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
+ ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
+
int kgdb_validate_break_address(unsigned long addr)
{
int cpu = raw_smp_processor_id();
@@ -724,46 +648,17 @@
int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
{
- int err;
- int cpu = raw_smp_processor_id();
-
- if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE)
- == NULL)
- return -EFAULT;
-
- if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE) == NULL)
- return -EFAULT;
-
- return 0;
- } else {
- err = probe_kernel_read(saved_instr, (char *)addr,
- BREAK_INSTR_SIZE);
- if (err)
- return err;
-
- return probe_kernel_write((char *)addr,
- arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
- }
+ int err = bfin_probe_kernel_read(saved_instr, (char *)addr,
+ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+ return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
}
int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
{
- if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) {
- /* access L1 instruction SRAM */
- if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
- return -EFAULT;
-
- return 0;
- } else
- return probe_kernel_write((char *)addr,
- (char *)bundle, BREAK_INSTR_SIZE);
+ return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE);
}
int kgdb_arch_init(void)
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S
deleted file mode 100644
index edcfb38..0000000
--- a/arch/blackfin/kernel/mcount.S
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * linux/arch/blackfin/mcount.S
- *
- * Copyright (C) 2006 Analog Devices Inc.
- *
- * 2007/04/12 Save index, length, modify and base registers. --rpm
- */
-
-#include <linux/linkage.h>
-#include <asm/blackfin.h>
-
-.text
-
-.align 4 /* just in case */
-
-ENTRY(__mcount)
- [--sp] = i0;
- [--sp] = i1;
- [--sp] = i2;
- [--sp] = i3;
- [--sp] = l0;
- [--sp] = l1;
- [--sp] = l2;
- [--sp] = l3;
- [--sp] = m0;
- [--sp] = m1;
- [--sp] = m2;
- [--sp] = m3;
- [--sp] = b0;
- [--sp] = b1;
- [--sp] = b2;
- [--sp] = b3;
- [--sp] = ( r7:0, p5:0 );
- [--sp] = ASTAT;
-
- p1.L = _ipipe_trace_enable;
- p1.H = _ipipe_trace_enable;
- r7 = [p1];
- CC = r7 == 0;
- if CC jump out;
- link 0x10;
- r0 = 0x0;
- [sp + 0xc] = r0; /* v */
- r0 = 0x0; /* type: IPIPE_TRACE_FN */
- r1 = rets;
- p0 = [fp]; /* p0: Prior FP */
- r2 = [p0 + 4]; /* r2: Prior RETS */
- call ___ipipe_trace;
- unlink;
-out:
- ASTAT = [sp++];
- ( r7:0, p5:0 ) = [sp++];
- b3 = [sp++];
- b2 = [sp++];
- b1 = [sp++];
- b0 = [sp++];
- m3 = [sp++];
- m2 = [sp++];
- m1 = [sp++];
- m0 = [sp++];
- l3 = [sp++];
- l2 = [sp++];
- l1 = [sp++];
- l0 = [sp++];
- i3 = [sp++];
- i2 = [sp++];
- i1 = [sp++];
- i0 = [sp++];
- rts;
-ENDPROC(__mcount)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e1d86e..79cad0a 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -344,6 +344,87 @@
}
}
+static inline
+int in_mem(unsigned long addr, unsigned long size,
+ unsigned long start, unsigned long end)
+{
+ return addr >= start && addr + size <= end;
+}
+static inline
+int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return const_size &&
+ in_mem(addr, size, const_addr + off, const_addr + const_size);
+}
+static inline
+int in_mem_const(unsigned long addr, unsigned long size,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return in_mem_const_off(addr, 0, size, const_addr, const_size);
+}
+#define IN_ASYNC(bnum, bctlnum) \
+({ \
+ (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \
+ bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \
+ BFIN_MEM_ACCESS_CORE; \
+})
+
+int bfin_mem_access_type(unsigned long addr, unsigned long size)
+{
+ int cpu = raw_smp_processor_id();
+
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
+ return -EFAULT;
+
+ if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#endif
+ if (in_mem_const(addr, size, L2_START, L2_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (addr >= SYSMMR_BASE)
+ return BFIN_MEM_ACCESS_CORE_ONLY;
+
+ /* We can't read EBIU banks that aren't enabled or we end up hanging
+ * on the access to the async space.
+ */
+ if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE))
+ return IN_ASYNC(0, 0);
+ if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
+ return IN_ASYNC(1, 0);
+ if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
+ return IN_ASYNC(2, 1);
+ if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
+ return IN_ASYNC(3, 1);
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_DMA;
+
+ return -EFAULT;
+}
+
#if defined(CONFIG_ACCESS_CHECK)
#ifdef CONFIG_ACCESS_OK_L1
__attribute__((l1_text))
@@ -353,51 +434,61 @@
{
if (size == 0)
return 1;
- if (addr > (addr + size))
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
return 0;
if (segment_eq(get_fs(), KERNEL_DS))
return 1;
#ifdef CONFIG_MTD_UCLINUX
- if (addr >= memory_start && (addr + size) <= memory_end)
- return 1;
- if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
+ if (1)
+#else
+ if (0)
+#endif
+ {
+ if (in_mem(addr, size, memory_start, memory_end))
+ return 1;
+ if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
+ return 1;
+# ifndef CONFIG_ROMFS_ON_MTD
+ if (0)
+# endif
+ /* For XIP, allow user space to use pointers within the ROMFS. */
+ if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
+ return 1;
+ } else {
+ if (in_mem(addr, size, memory_start, physical_mem_end))
+ return 1;
+ }
+
+ if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
return 1;
-#ifdef CONFIG_ROMFS_ON_MTD
- /* For XIP, allow user space to use pointers within the ROMFS. */
- if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return 1;
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
return 1;
#endif
-#else
- if (addr >= memory_start && (addr + size) <= physical_mem_end)
+ if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
return 1;
-#endif
- if (addr >= (unsigned long)__init_begin &&
- addr + size <= (unsigned long)__init_end)
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
return 1;
- if (addr >= get_l1_scratch_start()
- && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
return 1;
-#if L1_CODE_LENGTH != 0
- if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
- && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
- return 1;
-#endif
-#if L1_DATA_A_LENGTH != 0
- if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
- && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
- return 1;
-#endif
-#if L1_DATA_B_LENGTH != 0
- if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
- && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
- return 1;
-#endif
-#if L2_LENGTH != 0
- if (addr >= L2_START + (_ebss_l2 - _stext_l2)
- && addr + size <= L2_START + L2_LENGTH)
- return 1;
-#endif
+
return 0;
}
EXPORT_SYMBOL(_access_ok);
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 6454bab..298f023 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -117,15 +117,49 @@
*/
#ifdef CONFIG_BFIN_ICACHE
printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
-#endif
-#ifdef CONFIG_BFIN_DCACHE
- printk(KERN_INFO "Data Cache Enabled for CPU%u"
-# if defined CONFIG_BFIN_WB
- " (write-back)"
-# elif defined CONFIG_BFIN_WT
- " (write-through)"
+ printk(KERN_INFO " External memory:"
+# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
# endif
- "\n", cpu);
+ " in instruction cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# ifdef CONFIG_BFIN_L2_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
+# endif
+ " in instruction cache\n");
+
+#else
+ printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
+#endif
+
+#ifdef CONFIG_BFIN_DCACHE
+ printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
+ printk(KERN_INFO " External memory:"
+# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
+# endif
+ " in data cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# if defined CONFIG_BFIN_L2_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_L2_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
+# endif
+ " in data cache\n");
+#else
+ printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
#endif
}
@@ -443,9 +477,11 @@
} else if (!memcmp(to, "clkin_hz=", 9)) {
to += 9;
early_init_clkin_hz(to);
+#ifdef CONFIG_EARLY_PRINTK
} else if (!memcmp(to, "earlyprintk=", 12)) {
to += 12;
setup_early_printk(to);
+#endif
} else if (!memcmp(to, "memmap=", 7)) {
to += 7;
parse_memmap(to);
@@ -516,7 +552,7 @@
&& ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1)
mtd_size =
PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
-# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -544,7 +580,7 @@
dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
#endif /* CONFIG_MTD_UCLINUX */
-#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -764,6 +800,11 @@
{
unsigned long sclk, cclk;
+ /* Check to make sure we are running on the right processor */
+ if (unlikely(CPUID != bfin_cpuid()))
+ printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
+ CPU, bfin_cpuid(), bfin_revid());
+
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
@@ -778,14 +819,17 @@
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
- /* setup memory defaults from the user config */
- physical_mem_end = 0;
- _ramend = get_mem_size() * 1024 * 1024;
-
memset(&bfin_memmap, 0, sizeof(bfin_memmap));
+ /* If the user does not specify things on the command line, use
+ * what the bootloader set things up as
+ */
+ physical_mem_end = 0;
parse_cmdline_early(&command_line[0]);
+ if (_ramend == 0)
+ _ramend = get_mem_size() * 1024 * 1024;
+
if (physical_mem_end == 0)
physical_mem_end = _ramend;
@@ -837,7 +881,8 @@
defined(CONFIG_BF538) || defined(CONFIG_BF539)
_bfin_swrst = bfin_read_SWRST();
#else
- _bfin_swrst = bfin_read_SYSCR();
+ /* Clear boot mode field */
+ _bfin_swrst = bfin_read_SYSCR() & ~0xf;
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -875,10 +920,7 @@
else
printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
- if (unlikely(CPUID != bfin_cpuid()))
- printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
- CPU, bfin_cpuid(), bfin_revid());
- else {
+ if (likely(CPUID == bfin_cpuid())) {
if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
@@ -1157,16 +1199,25 @@
icache_size = 0;
seq_printf(m, "cache size\t: %d KB(L1 icache) "
- "%d KB(L1 dcache%s) %d KB(L2 cache)\n",
- icache_size, dcache_size,
-#if defined CONFIG_BFIN_WB
- "-wb"
-#elif defined CONFIG_BFIN_WT
- "-wt"
-#endif
- "", 0);
-
+ "%d KB(L1 dcache) %d KB(L2 cache)\n",
+ icache_size, dcache_size, 0);
seq_printf(m, "%s\n", cache);
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
if (icache_size)
seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
@@ -1239,8 +1290,25 @@
if (cpu_num != num_possible_cpus() - 1)
return 0;
- if (L2_LENGTH)
+ if (L2_LENGTH) {
seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
+ }
seq_printf(m, "board name\t: %s\n", bfin_board_name);
seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index d279552..8eeb457 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -37,6 +37,7 @@
#include <asm/traps.h>
#include <asm/cacheflush.h>
#include <asm/cplb.h>
+#include <asm/dma.h>
#include <asm/blackfin.h>
#include <asm/irq_handler.h>
#include <linux/irq.h>
@@ -636,57 +637,30 @@
*/
static bool get_instruction(unsigned short *val, unsigned short *address)
{
-
- unsigned long addr;
-
- addr = (unsigned long)address;
+ unsigned long addr = (unsigned long)address;
/* Check for odd addresses */
if (addr & 0x1)
return false;
- /* Check that things do not wrap around */
- if (addr > (addr + 2))
+ /* MMR region will never have instructions */
+ if (addr >= SYSMMR_BASE)
return false;
- /*
- * Since we are in exception context, we need to do a little address checking
- * We need to make sure we are only accessing valid memory, and
- * we don't read something in the async space that can hang forever
- */
- if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) ||
-#if L2_LENGTH != 0
- (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) ||
-#endif
- (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) ||
-#if L1_DATA_A_LENGTH != 0
- (addr >= L1_DATA_A_START && (addr + 2) <= (L1_DATA_A_START + L1_DATA_A_LENGTH)) ||
-#endif
-#if L1_DATA_B_LENGTH != 0
- (addr >= L1_DATA_B_START && (addr + 2) <= (L1_DATA_B_START + L1_DATA_B_LENGTH)) ||
-#endif
- (addr >= L1_SCRATCH_START && (addr + 2) <= (L1_SCRATCH_START + L1_SCRATCH_LENGTH)) ||
- (!(bfin_read_EBIU_AMBCTL0() & B0RDYEN) &&
- addr >= ASYNC_BANK0_BASE && (addr + 2) <= (ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL0() & B1RDYEN) &&
- addr >= ASYNC_BANK1_BASE && (addr + 2) <= (ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL1() & B2RDYEN) &&
- addr >= ASYNC_BANK2_BASE && (addr + 2) <= (ASYNC_BANK2_BASE + ASYNC_BANK1_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL1() & B3RDYEN) &&
- addr >= ASYNC_BANK3_BASE && (addr + 2) <= (ASYNC_BANK3_BASE + ASYNC_BANK1_SIZE))) {
- *val = *address;
- return true;
+ switch (bfin_mem_access_type(addr, 2)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ *val = *address;
+ return true;
+ case BFIN_MEM_ACCESS_DMA:
+ dma_memcpy(val, address, 2);
+ return true;
+ case BFIN_MEM_ACCESS_ITEST:
+ isram_memcpy(val, address, 2);
+ return true;
+ default: /* invalid access */
+ return false;
}
-
-#if L1_CODE_LENGTH != 0
- if (addr >= L1_CODE_START && (addr + 2) <= (L1_CODE_START + L1_CODE_LENGTH)) {
- isram_memcpy(val, address, 2);
- return true;
- }
-#endif
-
-
- return false;
}
/*
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 1382f03..d979110 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -119,13 +119,19 @@
};
#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
-static struct dsa_platform_data ksz8893m_switch_data = {
+static struct dsa_chip_data ksz8893m_switch_chip_data = {
.mii_bus = &bfin_mii_bus.dev,
+ .port_names = {
+ NULL,
+ "eth%d",
+ "eth%d",
+ "cpu",
+ },
+};
+static struct dsa_platform_data ksz8893m_switch_data = {
+ .nr_chips = 1,
.netdev = &bfin_mac_device.dev,
- .port_names[0] = NULL,
- .port_names[1] = "eth%d",
- .port_names[2] = "eth%d",
- .port_names[3] = "cpu",
+ .chip = &ksz8893m_switch_chip_data,
};
static struct platform_device ksz8893m_switch_device = {
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index b69bd9a..426e064 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -7,7 +7,7 @@
*/
/* This file should be up to date with:
- * - Revision B, 02/03/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
+ * - Revision C, 06/12/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
*/
/* We plan on not supporting 0.0 silicon, but 0.1 isn't out yet - sorry */
@@ -18,7 +18,7 @@
#ifndef _MACH_ANOMALY_H_
#define _MACH_ANOMALY_H_
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
@@ -45,29 +45,31 @@
/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
#define ANOMALY_05000426 (1)
/* Software System Reset Corrupts PLL_LOCKCNT Register */
-#define ANOMALY_05000430 (1)
+#define ANOMALY_05000430 (__SILICON_REVISION__ < 1)
/* Incorrect Use of Stack in Lockbox Firmware During Authentication */
#define ANOMALY_05000431 (1)
/* Certain SIC Registers are not Reset After Soft or Core Double Fault Reset */
-#define ANOMALY_05000435 (1)
+#define ANOMALY_05000435 (__SILICON_REVISION__ < 1)
/* PORTx_DRIVE and PORTx_HYSTERESIS Registers Read Back Incorrect Values */
-#define ANOMALY_05000438 (1)
+#define ANOMALY_05000438 (__SILICON_REVISION__ < 1)
/* Preboot Cannot be Used to Alter the PLL_DIV Register */
-#define ANOMALY_05000439 (1)
+#define ANOMALY_05000439 (__SILICON_REVISION__ < 1)
/* bfrom_SysControl() Cannot be Used to Write the PLL_DIV Register */
-#define ANOMALY_05000440 (1)
+#define ANOMALY_05000440 (__SILICON_REVISION__ < 1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
/* Incorrect L1 Instruction Bank B Memory Map Location */
-#define ANOMALY_05000444 (1)
+#define ANOMALY_05000444 (__SILICON_REVISION__ < 1)
/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
-#define ANOMALY_05000452 (1)
+#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
/* PWM_TRIPB Signal Not Available on PG10 */
-#define ANOMALY_05000453 (1)
+#define ANOMALY_05000453 (__SILICON_REVISION__ < 1)
/* PPI_FS3 is Driven One Half Cycle Later Than PPI Data */
-#define ANOMALY_05000455 (1)
-/* False Hardware Error when RETI points to invalid memory */
+#define ANOMALY_05000455 (__SILICON_REVISION__ < 1)
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
+#define ANOMALY_05000462 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -78,24 +80,30 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000281 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000285 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
@@ -103,10 +111,13 @@
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000312 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000353 (0)
+#define ANOMALY_05000357 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
+#define ANOMALY_05000371 (0)
#define ANOMALY_05000380 (0)
#define ANOMALY_05000386 (0)
#define ANOMALY_05000389 (0)
@@ -117,5 +128,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
index 267bb7c..e8e14c2 100644
--- a/arch/blackfin/mach-bf518/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf518.h"
-#include "mem_map.h"
#include "defBF512.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf518/include/mach/mem_map.h b/arch/blackfin/mach-bf518/include/mach/mem_map.h
index 62bcc78..3c6777c 100644
--- a/arch/blackfin/mach-bf518/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf518/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf518/mem_map.h
- * based on: include/asm-blackfin/mach-bf527/mem_map.h
- * author: Bryan Wu <cooloney@kernel.org>
+ * BF51x memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF518/6/4/2 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_518_H_
-#define _MEM_MAP_518_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -89,20 +67,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE */
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_518_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 1eaf27f..f4867ce 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -78,7 +78,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 9f9c000..b2f30f0 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -237,10 +237,10 @@
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
- .type = "m25p16",
+ .type = "sst25wf040",
};
-/* SPI flash chip (m25p64) */
+/* SPI flash chip (sst25wf040) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 3e5b7db..799a1d1 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -77,7 +77,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index c84ddea..0d63f74 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -34,7 +34,7 @@
#define _ANOMALY_BF527(rev527) (ANOMALY_BF527 && __SILICON_REVISION__ rev527)
#define _ANOMALY_BF526_BF527(rev526, rev527) (_ANOMALY_BF526(rev526) || _ANOMALY_BF527(rev527))
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1) /* note: brokenness is noted in documentation, not anomaly sheet */
@@ -184,8 +184,12 @@
#define ANOMALY_05000456 (1)
/* Host DMA Port Responds to Certain Bus Activity Without HOST_CE Assertion */
#define ANOMALY_05000457 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* USB Rx DMA hang */
+#define ANOMALY_05000465 (1)
+/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
+#define ANOMALY_05000467 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -195,24 +199,30 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000281 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000285 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
@@ -220,6 +230,7 @@
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000312 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
diff --git a/arch/blackfin/mach-bf527/include/mach/blackfin.h b/arch/blackfin/mach-bf527/include/mach/blackfin.h
index 417abcd..03665a8 100644
--- a/arch/blackfin/mach-bf527/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf527/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf527.h"
-#include "mem_map.h"
#include "defBF522.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf527/include/mach/mem_map.h b/arch/blackfin/mach-bf527/include/mach/mem_map.h
index 019e001..d96e894 100644
--- a/arch/blackfin/mach-bf527/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf527/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf527/mem_map.h
- * based on: include/asm-blackfin/mach-bf537/mem_map.h
- * author: Michael Hennerich (michael.hennerich@analog.com)
+ * BF52x memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF527/5/2 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_527_H_
-#define _MEM_MAP_527_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -89,20 +67,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE */
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_527_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 89a5ec4..4e3e511 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
@@ -86,6 +87,101 @@
};
#endif
+#if defined(CONFIG_MTD_PSD4256G) || defined(CONFIG_MTD_PSD4256G_MODULE)
+static const char *map_probes[] = {
+ "stm_flash",
+ NULL,
+};
+
+static struct platdata_mtd_ram stm_pri_data_a = {
+ .mapname = "Flash A Primary",
+ .map_probes = map_probes,
+ .bankwidth = 2,
+};
+
+static struct resource stm_pri_resource_a = {
+ .start = 0x20000000,
+ .end = 0x200fffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device stm_pri_device_a = {
+ .name = "mtd-ram",
+ .id = 0,
+ .dev = {
+ .platform_data = &stm_pri_data_a,
+ },
+ .num_resources = 1,
+ .resource = &stm_pri_resource_a,
+};
+
+static struct platdata_mtd_ram stm_pri_data_b = {
+ .mapname = "Flash B Primary",
+ .map_probes = map_probes,
+ .bankwidth = 2,
+};
+
+static struct resource stm_pri_resource_b = {
+ .start = 0x20100000,
+ .end = 0x201fffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device stm_pri_device_b = {
+ .name = "mtd-ram",
+ .id = 4,
+ .dev = {
+ .platform_data = &stm_pri_data_b,
+ },
+ .num_resources = 1,
+ .resource = &stm_pri_resource_b,
+};
+#endif
+
+#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE)
+static struct platdata_mtd_ram sram_data_a = {
+ .mapname = "Flash A SRAM",
+ .bankwidth = 2,
+};
+
+static struct resource sram_resource_a = {
+ .start = 0x20240000,
+ .end = 0x2024ffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device sram_device_a = {
+ .name = "mtd-ram",
+ .id = 8,
+ .dev = {
+ .platform_data = &sram_data_a,
+ },
+ .num_resources = 1,
+ .resource = &sram_resource_a,
+};
+
+static struct platdata_mtd_ram sram_data_b = {
+ .mapname = "Flash B SRAM",
+ .bankwidth = 2,
+};
+
+static struct resource sram_resource_b = {
+ .start = 0x202c0000,
+ .end = 0x202cffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device sram_device_b = {
+ .name = "mtd-ram",
+ .id = 9,
+ .dev = {
+ .platform_data = &sram_data_b,
+ },
+ .num_resources = 1,
+ .resource = &sram_resource_b,
+};
+#endif
+
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
@@ -357,6 +453,16 @@
&bfin_dpmc,
+#if defined(CONFIG_MTD_PSD4256G) || defined(CONFIG_MTD_PSD4256G_MODULE)
+ &stm_pri_device_a,
+ &stm_pri_device_b,
+#endif
+
+#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE)
+ &sram_device_a,
+ &sram_device_b,
+#endif
+
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 31145b5..70a0ad6 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -34,7 +34,7 @@
# define ANOMALY_BF533 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
@@ -46,7 +46,7 @@
#define ANOMALY_05000122 (1)
/* Instruction DMA Can Cause Data Cache Fills to Fail (Boot Implications) */
#define ANOMALY_05000158 (__SILICON_REVISION__ < 5)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
#define ANOMALY_05000167 (1)
@@ -56,13 +56,13 @@
#define ANOMALY_05000180 (1)
/* Timer Pin Limitations for PPI TX Modes with External Frame Syncs */
#define ANOMALY_05000183 (__SILICON_REVISION__ < 4)
-/* False Protection Exceptions */
+/* False Protection Exceptions when Speculative Fetch Is Cancelled */
#define ANOMALY_05000189 (__SILICON_REVISION__ < 4)
/* False I/O Pin Interrupts on Edge-Sensitive Inputs When Polarity Setting Is Changed */
#define ANOMALY_05000193 (__SILICON_REVISION__ < 4)
/* Restarting SPORT in Specific Modes May Cause Data Corruption */
#define ANOMALY_05000194 (__SILICON_REVISION__ < 4)
-/* Failing MMR Accesses When Stalled by Preceding Memory Read */
+/* Failing MMR Accesses when Preceding Memory Read Stalls */
#define ANOMALY_05000198 (__SILICON_REVISION__ < 5)
/* Current DMA Address Shows Wrong Value During Carry Fix */
#define ANOMALY_05000199 (__SILICON_REVISION__ < 4)
@@ -74,7 +74,7 @@
#define ANOMALY_05000202 (__SILICON_REVISION__ < 5)
/* Specific Sequence That Can Cause DMA Error or DMA Stopping */
#define ANOMALY_05000203 (__SILICON_REVISION__ < 4)
-/* Incorrect data read with write-through cache and allocate cache lines on reads only mode */
+/* Incorrect Data Read with Writethrough "Allocate Cache Lines on Reads Only" Cache Mode */
#define ANOMALY_05000204 (__SILICON_REVISION__ < 4 && ANOMALY_BF533)
/* Recovery from "Brown-Out" Condition */
#define ANOMALY_05000207 (__SILICON_REVISION__ < 4)
@@ -106,7 +106,7 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 5)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
-/* Data CPLBs Should Prevent Spurious Hardware Errors */
+/* Data CPLBs Should Prevent False Hardware Errors */
#define ANOMALY_05000246 (__SILICON_REVISION__ < 5)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ == 4)
@@ -148,21 +148,21 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 6)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 6)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 6)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 6)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 6)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 6)
/* Memory-To-Memory DMA Source/Destination Descriptors Must Be in Same Memory Space */
#define ANOMALY_05000301 (__SILICON_REVISION__ < 6)
-/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
+/* SSYNCs after Writes to DMA MMR Registers May Not Be Handled Correctly */
#define ANOMALY_05000302 (__SILICON_REVISION__ < 5)
/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
-/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available On Older Silicon) */
+/* ALT_TIMING Bit in PPI_CONTROL Register Is Not Functional */
#define ANOMALY_05000306 (__SILICON_REVISION__ < 5)
/* SCKELOW Bit Does Not Maintain State Through Hibernate */
#define ANOMALY_05000307 (1) /* note: brokenness is noted in documentation, not anomaly sheet */
@@ -170,11 +170,11 @@
#define ANOMALY_05000310 (1)
/* Erroneous Flag (GPIO) Pin Operations under Specific Sequences */
#define ANOMALY_05000311 (__SILICON_REVISION__ < 6)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 6)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (__SILICON_REVISION__ < 6)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 6)
/* Internal Voltage Regulator Values of 1.05V, 1.10V and 1.15V Not Allowed for LQFP Packages */
#define ANOMALY_05000319 ((ANOMALY_BF531 || ANOMALY_BF532) && __SILICON_REVISION__ < 6)
@@ -200,7 +200,7 @@
#define ANOMALY_05000426 (1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* These anomalies have been "phased" out of analog.com anomaly sheets and are
@@ -215,17 +215,17 @@
#define ANOMALY_05000070 (__SILICON_REVISION__ < 2)
/* Writing FIO_DIR can corrupt a programmable flag's data */
#define ANOMALY_05000079 (__SILICON_REVISION__ < 2)
-/* Timer Auto-Baud Mode requires the UART clock to be enabled */
+/* Timer Auto-Baud Mode requires the UART clock to be enabled. */
#define ANOMALY_05000086 (__SILICON_REVISION__ < 2)
/* Internal Clocking Modes on SPORT0 not supported */
#define ANOMALY_05000088 (__SILICON_REVISION__ < 2)
/* Internal voltage regulator does not wake up from an RTC wakeup */
#define ANOMALY_05000092 (__SILICON_REVISION__ < 2)
-/* The IFLUSH instruction must be preceded by a CSYNC instruction */
+/* The IFLUSH Instruction Must Be Preceded by a CSYNC Instruction */
#define ANOMALY_05000093 (__SILICON_REVISION__ < 2)
-/* Vectoring to an instruction that is presently being filled into the instruction cache may cause erroneous behavior */
+/* Vectoring to instruction that is being filled into the i-cache may cause erroneous behavior */
#define ANOMALY_05000095 (__SILICON_REVISION__ < 2)
-/* PREFETCH, FLUSH, and FLUSHINV must be followed by a CSYNC */
+/* PREFETCH, FLUSH, and FLUSHINV Instructions Must Be Followed by a CSYNC Instruction */
#define ANOMALY_05000096 (__SILICON_REVISION__ < 2)
/* Performance Monitor 0 and 1 are swapped when monitoring memory events */
#define ANOMALY_05000097 (__SILICON_REVISION__ < 2)
@@ -235,45 +235,45 @@
#define ANOMALY_05000100 (__SILICON_REVISION__ < 2)
/* Reading X_MODIFY or Y_MODIFY while DMA channel is active */
#define ANOMALY_05000101 (__SILICON_REVISION__ < 2)
-/* Descriptor-based MemDMA may lock up with 32-bit transfers or if transfers span 64KB buffers */
+/* Descriptor MemDMA may lock up with 32-bit transfers or if transfers span 64KB buffers */
#define ANOMALY_05000102 (__SILICON_REVISION__ < 2)
-/* Incorrect value written to the cycle counters */
+/* Incorrect Value Written to the Cycle Counters */
#define ANOMALY_05000103 (__SILICON_REVISION__ < 2)
-/* Stores to L1 Data memory incorrect when a specific sequence is followed */
+/* Stores to L1 Data Memory Incorrect when a Specific Sequence Is Followed */
#define ANOMALY_05000104 (__SILICON_REVISION__ < 2)
/* Programmable Flag (PF3) functionality not supported in all PPI modes */
#define ANOMALY_05000106 (__SILICON_REVISION__ < 2)
/* Data store can be lost when targeting a cache line fill */
#define ANOMALY_05000107 (__SILICON_REVISION__ < 2)
-/* Reserved bits in SYSCFG register not set at power on */
+/* Reserved Bits in SYSCFG Register Not Set at Power-On */
#define ANOMALY_05000109 (__SILICON_REVISION__ < 3)
/* Infinite Core Stall */
#define ANOMALY_05000114 (__SILICON_REVISION__ < 2)
-/* PPI_FSx may glitch when generated by the on chip Timers */
+/* PPI_FSx may glitch when generated by the on chip Timers. */
#define ANOMALY_05000115 (__SILICON_REVISION__ < 2)
-/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */
+/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
/* DTEST registers allow access to Data Cache when DTEST_COMMAND< 14 >= 0 */
#define ANOMALY_05000117 (__SILICON_REVISION__ < 2)
/* Booting from an 8-bit or 24-bit Addressable SPI device is not supported */
#define ANOMALY_05000118 (__SILICON_REVISION__ < 2)
-/* DTEST_COMMAND initiated memory access may be incorrect if data cache or DMA is active */
+/* DTEST_COMMAND Initiated Memory Access May Be Incorrect If Data Cache or DMA Is Active */
#define ANOMALY_05000123 (__SILICON_REVISION__ < 3)
/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
#define ANOMALY_05000124 (__SILICON_REVISION__ < 3)
-/* Erroneous exception when enabling cache */
+/* Erroneous Exception when Enabling Cache */
#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
/* SPI clock polarity and phase bits incorrect during booting */
#define ANOMALY_05000126 (__SILICON_REVISION__ < 3)
-/* DMEM_CONTROL is not set on Reset */
+/* DMEM_CONTROL<12> Is Not Set on Reset */
#define ANOMALY_05000137 (__SILICON_REVISION__ < 3)
/* SPI boot will not complete if there is a zero fill block in the loader file */
#define ANOMALY_05000138 (__SILICON_REVISION__ == 2)
-/* Timerx_Config must be set for using the PPI in GP output mode with internal Frame Syncs */
+/* TIMERx_CONFIG[5] must be set for PPI in GP output mode with internal Frame Syncs */
#define ANOMALY_05000139 (__SILICON_REVISION__ < 2)
/* Allowing the SPORT RX FIFO to fill will cause an overflow */
#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
-/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */
+/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
@@ -287,7 +287,7 @@
#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
-/* When booting from a 16-bit asynchronous memory device, the upper 8-bits of each word must be 0x00 */
+/* When booting from 16-bit asynchronous memory, the upper 8 bits of each word must be 0x00 */
#define ANOMALY_05000148 (__SILICON_REVISION__ < 3)
/* Frame Delay in SPORT Multichannel Mode */
#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
@@ -295,13 +295,13 @@
#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
/* Timer1 can not be used for PWMOUT mode when a certain PPI mode is in use */
#define ANOMALY_05000155 (__SILICON_REVISION__ < 3)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
-/* SPORT transmit data is not gated by external frame sync in certain conditions */
+/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
-/* SDRAM auto-refresh and subsequent Power Ups */
+/* Undefined Behavior when Power-Up Sequence Is Issued to SDRAM during Auto-Refresh */
#define ANOMALY_05000168 (__SILICON_REVISION__ < 3)
-/* DATA CPLB page miss can result in lost write-through cache data writes */
+/* DATA CPLB Page Miss Can Result in Lost Write-Through Data Cache Writes */
#define ANOMALY_05000169 (__SILICON_REVISION__ < 3)
/* DMA vs Core accesses to external memory */
#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
@@ -309,15 +309,15 @@
#define ANOMALY_05000174 (__SILICON_REVISION__ < 3)
/* Overlapping Sequencer and Memory Stalls */
#define ANOMALY_05000175 (__SILICON_REVISION__ < 3)
-/* Multiplication of (-1) by (-1) followed by an accumulator saturation */
+/* Overflow Bit Asserted when Multiplication of -1 by -1 Followed by Accumulator Saturation */
#define ANOMALY_05000176 (__SILICON_REVISION__ < 3)
-/* Disabling the PPI resets the PPI configuration registers */
+/* Disabling the PPI Resets the PPI Configuration Registers */
#define ANOMALY_05000181 (__SILICON_REVISION__ < 3)
-/* PPI TX Mode with 2 External Frame Syncs */
+/* Early PPI Transmit when FS1 Asserts before FS2 in TX Mode with 2 External Frame Syncs */
#define ANOMALY_05000185 (__SILICON_REVISION__ < 3)
/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
-/* In PPI Transmit Modes with External Frame Syncs POLC */
+/* In PPI Transmit Modes with External Frame Syncs POLC bit must be set to 1 */
#define ANOMALY_05000192 (__SILICON_REVISION__ < 3)
/* Internal Voltage Regulator may not start up */
#define ANOMALY_05000206 (__SILICON_REVISION__ < 3)
@@ -326,6 +326,7 @@
#define ANOMALY_05000120 (0)
#define ANOMALY_05000149 (0)
#define ANOMALY_05000171 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000266 (0)
@@ -345,5 +346,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/blackfin.h b/arch/blackfin/mach-bf533/include/mach/blackfin.h
index 045184f..39aa175 100644
--- a/arch/blackfin/mach-bf533/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf533/include/mach/blackfin.h
@@ -34,7 +34,6 @@
#define BF533_FAMILY
#include "bf533.h"
-#include "mem_map.h"
#include "defBF532.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf533/include/mach/mem_map.h b/arch/blackfin/mach-bf533/include/mach/mem_map.h
index fc33b7c..197af1a 100644
--- a/arch/blackfin/mach-bf533/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf533/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * File: include/asm-blackfin/mach-bf533/mem_map.h
- * Based on:
- * Author:
+ * BF533 memory map
*
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_533_H_
-#define _MEM_MAP_533_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -158,20 +136,4 @@
#endif
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_533_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index ff7228c..c1f76dd 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -79,7 +79,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index fc96634..57c128c 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -34,13 +34,13 @@
# define ANOMALY_BF537 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
#define ANOMALY_05000180 (1)
@@ -50,11 +50,11 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
-/* CLKIN Buffer Output Enable Reset Behavior Is Changed */
+/* Buffered CLKIN Output Is Disabled by Default */
#define ANOMALY_05000247 (1)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ < 3)
-/* EMAC Tx DMA error after an early frame abort */
+/* EMAC TX DMA Error After an Early Frame Abort */
#define ANOMALY_05000252 (__SILICON_REVISION__ < 3)
/* Maximum External Clock Speed for Timers */
#define ANOMALY_05000253 (__SILICON_REVISION__ < 3)
@@ -62,7 +62,7 @@
#define ANOMALY_05000254 (__SILICON_REVISION__ > 2)
/* Entering Hibernate State with RTC Seconds Interrupt Not Functional */
#define ANOMALY_05000255 (__SILICON_REVISION__ < 3)
-/* EMAC MDIO input latched on wrong MDC edge */
+/* EMAC MDIO Input Latched on Wrong MDC Edge */
#define ANOMALY_05000256 (__SILICON_REVISION__ < 3)
/* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */
#define ANOMALY_05000257 (__SILICON_REVISION__ < 3)
@@ -80,7 +80,7 @@
#define ANOMALY_05000264 (__SILICON_REVISION__ < 3)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (1)
-/* Memory DMA error when peripheral DMA is running with non-zero DEB_TRAFFIC_PERIOD */
+/* Memory DMA Error when Peripheral DMA Is Running with Non-Zero DEB_TRAFFIC_PERIOD */
#define ANOMALY_05000268 (__SILICON_REVISION__ < 3)
/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
#define ANOMALY_05000270 (__SILICON_REVISION__ < 3)
@@ -92,15 +92,15 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2))
-/* SPI Master boot mode does not work well with Atmel Data flash devices */
+/* SPI Master Boot Mode Does Not Work Well with Atmel Data Flash Devices */
#define ANOMALY_05000280 (1)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 3)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 3)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 3)
-/* New Feature: EMAC TX DMA Word Alignment (Not Available On Older Silicon) */
+/* TXDWA Bit in EMAC_SYSCTL Register Is Not Functional */
#define ANOMALY_05000285 (__SILICON_REVISION__ < 3)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 3)
@@ -112,25 +112,25 @@
#define ANOMALY_05000305 (__SILICON_REVISION__ < 3)
/* SCKELOW Bit Does Not Maintain State Through Hibernate */
#define ANOMALY_05000307 (__SILICON_REVISION__ < 3)
-/* Writing UART_THR while UART clock is disabled sends erroneous start bit */
+/* Writing UART_THR While UART Clock Is Disabled Sends Erroneous Start Bit */
#define ANOMALY_05000309 (__SILICON_REVISION__ < 3)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (1)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (1)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode: collisions occur in Full Duplex mode */
+/* EMAC RMII Mode: Collisions Occur in Full Duplex Mode */
#define ANOMALY_05000316 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode: TX frames in half duplex fail with status No Carrier */
+/* EMAC RMII Mode: TX Frames in Half Duplex Fail with Status "No Carrier" */
#define ANOMALY_05000321 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode at 10-Base-T speed: RX frames not received properly */
+/* EMAC RMII Mode at 10-Base-T Speed: RX Frames Not Received Properly */
#define ANOMALY_05000322 (1)
/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */
#define ANOMALY_05000341 (__SILICON_REVISION__ >= 3)
-/* New Feature: UART Remains Enabled after UART Boot */
+/* UART Gets Disabled after UART Boot */
#define ANOMALY_05000350 (__SILICON_REVISION__ >= 3)
/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
#define ANOMALY_05000355 (1)
@@ -154,7 +154,7 @@
#define ANOMALY_05000426 (1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
@@ -165,14 +165,17 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000266 (0)
@@ -195,5 +198,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index 7d6069c..f5e5015 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -35,7 +35,6 @@
#define BF537_FAMILY
#include "bf537.h"
-#include "mem_map.h"
#include "defBF534.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf537/include/mach/mem_map.h b/arch/blackfin/mach-bf537/include/mach/mem_map.h
index f9010c4..942f08d 100644
--- a/arch/blackfin/mach-bf537/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf537/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf537/mem_map.h
- * based on:
- * author:
+ * BF537 memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF537/6/4 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_537_H_
-#define _MEM_MAP_537_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -166,20 +144,4 @@
#endif
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_537_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 175ca9e..c97acdf 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -30,13 +30,13 @@
# define ANOMALY_BF539 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
#define ANOMALY_05000179 (1)
@@ -70,11 +70,11 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 4)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 4)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 4)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 4)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 4)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 4)
@@ -92,11 +92,11 @@
#define ANOMALY_05000307 (__SILICON_REVISION__ < 4)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 5)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (__SILICON_REVISION__ < 4)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 4)
/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */
#define ANOMALY_05000318 (ANOMALY_BF539 && __SILICON_REVISION__ < 4)
@@ -110,7 +110,7 @@
#define ANOMALY_05000371 (__SILICON_REVISION__ < 5)
/* Entering Hibernate State with Peripheral Wakeups Enabled Draws Excess Current */
#define ANOMALY_05000374 (__SILICON_REVISION__ == 4)
-/* New Feature: Open-Drain GPIO Outputs on PC1 and PC4 (Not Available on Older Silicon) */
+/* GPIO Pins PC1 and PC4 Can Function as Normal Outputs */
#define ANOMALY_05000375 (__SILICON_REVISION__ < 4)
/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
#define ANOMALY_05000402 (__SILICON_REVISION__ < 4)
@@ -126,26 +126,32 @@
#define ANOMALY_05000436 (__SILICON_REVISION__ > 3)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
#define ANOMALY_05000120 (0)
+#define ANOMALY_05000125 (0)
#define ANOMALY_05000149 (0)
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
#define ANOMALY_05000254 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000263 (0)
+#define ANOMALY_05000266 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000305 (0)
@@ -166,5 +172,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index 6f62835..9496196 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -35,7 +35,6 @@
#define BF538_FAMILY
#include "bf538.h"
-#include "mem_map.h"
#include "defBF539.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf538/include/mach/mem_map.h b/arch/blackfin/mach-bf538/include/mach/mem_map.h
index 7681196..aff00f4 100644
--- a/arch/blackfin/mach-bf538/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf538/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * File: include/asm-blackfin/mach-bf538/mem_map.h
- * Based on:
- * Author:
+ * BF538 memory map
*
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_538_H_
-#define _MEM_MAP_538_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -93,21 +71,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE*/
-
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_538_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 805a57b..81f5b95 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -76,7 +76,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index c510ae6..18a4cd24 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -18,7 +18,7 @@
# error will not work on BF548 silicon version 0.0, or 0.1
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
@@ -30,17 +30,17 @@
#define ANOMALY_05000265 (1)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
#define ANOMALY_05000272 (1)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
/* TWI Slave Boot Mode Is Not Functional */
#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
-/* External FIFO Boot Mode Is Not Functional */
+/* FIFO Boot Mode Not Functional */
#define ANOMALY_05000325 (__SILICON_REVISION__ < 2)
/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
@@ -178,8 +178,12 @@
#define ANOMALY_05000450 (1)
/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
#define ANOMALY_05000456 (__SILICON_REVISION__ < 3)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* USB Rx DMA hang */
+#define ANOMALY_05000465 (1)
+/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
+#define ANOMALY_05000467 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -189,30 +193,36 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
#define ANOMALY_05000254 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
#define ANOMALY_05000305 (0)
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index cf6c150..6b97396 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf548.h"
-#include "mem_map.h"
#include "anomaly.h"
#ifdef CONFIG_BF542
diff --git a/arch/blackfin/mach-bf548/include/mach/mem_map.h b/arch/blackfin/mach-bf548/include/mach/mem_map.h
index 70b9c11..caac2df 100644
--- a/arch/blackfin/mach-bf548/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf548/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf548/mem_map.h
- * based on:
- * author:
+ * BF548 memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF537/6/4 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_548_H_
-#define _MEM_MAP_548_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x2C000000 /* Async Bank 3 */
@@ -103,15 +81,4 @@
# define L2_LENGTH 0x20000
#endif
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif/* _MEM_MAP_548_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index b5ef7ff..4df904f 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -62,7 +62,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index dccd396..94b8e27 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -18,19 +18,19 @@
# error will not work on BF561 silicon version 0.0, 0.1, 0.2, or 0.4
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
-/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */
+/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
-/* Testset instructions restricted to 32-bit aligned memory locations */
+/* TESTSET Instructions Restricted to 32-Bit Aligned Memory Locations */
#define ANOMALY_05000120 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Erroneous exception when enabling cache */
+/* Erroneous Exception when Enabling Cache */
#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
-/* Signbits instruction not functional under certain conditions */
+/* SIGNBITS Instruction Not Functional under Certain Conditions */
#define ANOMALY_05000127 (1)
/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
@@ -40,7 +40,7 @@
#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
/* Allowing the SPORT RX FIFO to fill will cause an overflow */
#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
-/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */
+/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
@@ -52,7 +52,7 @@
#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
-/* IMDMA S1/D1 channel may stall */
+/* IMDMA S1/D1 Channel May Stall */
#define ANOMALY_05000149 (1)
/* DMA engine may lose data due to incorrect handshaking */
#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
@@ -66,7 +66,7 @@
#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
/* Timers in PWM-Out Mode with PPI GP Receive (Input) Mode with 0 Frame Syncs */
#define ANOMALY_05000156 (__SILICON_REVISION__ < 4)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
@@ -76,17 +76,17 @@
#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
/* DMEM_CONTROL<12> is not set on Reset */
#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
-/* SPORT transmit data is not gated by external frame sync in certain conditions */
+/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
#define ANOMALY_05000167 (1)
-/* SDRAM auto-refresh and subsequent Power Ups */
+/* Undefined Behavior when Power-Up Sequence Is Issued to SDRAM during Auto-Refresh */
#define ANOMALY_05000168 (__SILICON_REVISION__ < 5)
-/* DATA CPLB page miss can result in lost write-through cache data writes */
+/* DATA CPLB Page Miss Can Result in Lost Write-Through Data Cache Writes */
#define ANOMALY_05000169 (__SILICON_REVISION__ < 5)
-/* Boot-ROM code modifies SICA_IWRx wakeup registers */
+/* Boot-ROM Modifies SICA_IWRx Wakeup Registers */
#define ANOMALY_05000171 (__SILICON_REVISION__ < 5)
/* DSPID register values incorrect */
#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
@@ -96,29 +96,29 @@
#define ANOMALY_05000174 (__SILICON_REVISION__ < 5)
/* Overlapping Sequencer and Memory Stalls */
#define ANOMALY_05000175 (__SILICON_REVISION__ < 5)
-/* Multiplication of (-1) by (-1) followed by an accumulator saturation */
+/* Overflow Bit Asserted when Multiplication of -1 by -1 Followed by Accumulator Saturation */
#define ANOMALY_05000176 (__SILICON_REVISION__ < 5)
/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
#define ANOMALY_05000179 (__SILICON_REVISION__ < 5)
/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
#define ANOMALY_05000180 (1)
-/* Disabling the PPI resets the PPI configuration registers */
+/* Disabling the PPI Resets the PPI Configuration Registers */
#define ANOMALY_05000181 (__SILICON_REVISION__ < 5)
-/* IMDMA does not operate to full speed for 600MHz and higher devices */
+/* Internal Memory DMA Does Not Operate at Full Speed */
#define ANOMALY_05000182 (1)
-/* Timer Pin limitations for PPI TX Modes with External Frame Syncs */
+/* Timer Pin Limitations for PPI TX Modes with External Frame Syncs */
#define ANOMALY_05000184 (__SILICON_REVISION__ < 5)
-/* PPI TX Mode with 2 External Frame Syncs */
+/* Early PPI Transmit when FS1 Asserts before FS2 in TX Mode with 2 External Frame Syncs */
#define ANOMALY_05000185 (__SILICON_REVISION__ < 5)
-/* PPI packing with Data Length greater than 8 bits (not a meaningful mode) */
+/* Upper PPI Pins Driven when PPI Packing Enabled and Data Length >8 Bits */
#define ANOMALY_05000186 (__SILICON_REVISION__ < 5)
/* IMDMA Corrupted Data after a Halt */
#define ANOMALY_05000187 (1)
/* IMDMA Restrictions on Descriptor and Buffer Placement in Memory */
#define ANOMALY_05000188 (__SILICON_REVISION__ < 5)
-/* False Protection Exceptions */
+/* False Protection Exceptions when Speculative Fetch Is Cancelled */
#define ANOMALY_05000189 (__SILICON_REVISION__ < 5)
-/* PPI not functional at core voltage < 1Volt */
+/* PPI Not Functional at Core Voltage < 1Volt */
#define ANOMALY_05000190 (1)
/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
@@ -126,7 +126,7 @@
#define ANOMALY_05000193 (__SILICON_REVISION__ < 5)
/* Restarting SPORT in Specific Modes May Cause Data Corruption */
#define ANOMALY_05000194 (__SILICON_REVISION__ < 5)
-/* Failing MMR Accesses When Stalled by Preceding Memory Read */
+/* Failing MMR Accesses when Preceding Memory Read Stalls */
#define ANOMALY_05000198 (__SILICON_REVISION__ < 5)
/* Current DMA Address Shows Wrong Value During Carry Fix */
#define ANOMALY_05000199 (__SILICON_REVISION__ < 5)
@@ -134,9 +134,9 @@
#define ANOMALY_05000200 (__SILICON_REVISION__ < 5)
/* Possible Infinite Stall with Specific Dual-DAG Situation */
#define ANOMALY_05000202 (__SILICON_REVISION__ < 5)
-/* Incorrect data read with write-through cache and allocate cache lines on reads only mode */
+/* Incorrect Data Read with Writethrough "Allocate Cache Lines on Reads Only" Cache Mode */
#define ANOMALY_05000204 (__SILICON_REVISION__ < 5)
-/* Specific sequence that can cause DMA error or DMA stopping */
+/* Specific Sequence that Can Cause DMA Error or DMA Stopping */
#define ANOMALY_05000205 (__SILICON_REVISION__ < 5)
/* Recovery from "Brown-Out" Condition */
#define ANOMALY_05000207 (__SILICON_REVISION__ < 5)
@@ -158,7 +158,7 @@
#define ANOMALY_05000230 (__SILICON_REVISION__ < 5)
/* UART STB Bit Incorrectly Affects Receiver Setting */
#define ANOMALY_05000231 (__SILICON_REVISION__ < 5)
-/* SPORT data transmit lines are incorrectly driven in multichannel mode */
+/* SPORT Data Transmit Lines Are Incorrectly Driven in Multichannel Mode */
#define ANOMALY_05000232 (__SILICON_REVISION__ < 5)
/* DF Bit in PLL_CTL Register Does Not Respond to Hardware Reset */
#define ANOMALY_05000242 (__SILICON_REVISION__ < 5)
@@ -166,7 +166,7 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 5)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (__SILICON_REVISION__ < 5)
-/* TESTSET operation forces stall on the other core */
+/* TESTSET Operation Forces Stall on the Other Core */
#define ANOMALY_05000248 (__SILICON_REVISION__ < 5)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ > 2 && __SILICON_REVISION__ < 5)
@@ -192,9 +192,9 @@
#define ANOMALY_05000264 (__SILICON_REVISION__ < 5)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (__SILICON_REVISION__ < 5)
-/* IMDMA destination IRQ status must be read prior to using IMDMA */
+/* IMDMA Destination IRQ Status Must Be Read Prior to Using IMDMA */
#define ANOMALY_05000266 (__SILICON_REVISION__ > 3)
-/* IMDMA may corrupt data under certain conditions */
+/* IMDMA May Corrupt Data under Certain Conditions */
#define ANOMALY_05000267 (1)
/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Increase */
#define ANOMALY_05000269 (1)
@@ -202,7 +202,7 @@
#define ANOMALY_05000270 (1)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
#define ANOMALY_05000272 (1)
-/* Data cache write back to external synchronous memory may be lost */
+/* Data Cache Write Back to External Synchronous Memory May Be Lost */
#define ANOMALY_05000274 (1)
/* PPI Timing and Sampling Information Updates */
#define ANOMALY_05000275 (__SILICON_REVISION__ > 2)
@@ -212,17 +212,17 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 5)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (1)
-/* A read will receive incorrect data under certain conditions */
+/* Reads Will Receive Incorrect Data under Certain Conditions */
#define ANOMALY_05000287 (__SILICON_REVISION__ < 5)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 5)
/* Memory-To-Memory DMA Source/Destination Descriptors Must Be in Same Memory Space */
#define ANOMALY_05000301 (1)
-/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
+/* SSYNCs after Writes to DMA MMR Registers May Not Be Handled Correctly */
#define ANOMALY_05000302 (1)
/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
@@ -230,25 +230,25 @@
#define ANOMALY_05000307 (__SILICON_REVISION__ < 5)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (1)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (1)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (1)
-/* PF2 Output Remains Asserted After SPI Master Boot */
+/* PF2 Output Remains Asserted after SPI Master Boot */
#define ANOMALY_05000320 (__SILICON_REVISION__ > 3)
-/* Erroneous GPIO Flag Pin Operations Under Specific Sequences */
+/* Erroneous GPIO Flag Pin Operations under Specific Sequences */
#define ANOMALY_05000323 (1)
-/* SPORT Secondary Receive Channel Not Functional When Word Length Exceeds 16 Bits */
+/* SPORT Secondary Receive Channel Not Functional when Word Length >16 Bits */
#define ANOMALY_05000326 (__SILICON_REVISION__ > 3)
-/* New Feature: 24-Bit SPI Boot Mode Support (Not Available On Older Silicon) */
+/* 24-Bit SPI Boot Mode Is Not Functional */
#define ANOMALY_05000331 (__SILICON_REVISION__ < 5)
-/* New Feature: Slave SPI Boot Mode Supported (Not Available On Older Silicon) */
+/* Slave SPI Boot Mode Is Not Functional */
#define ANOMALY_05000332 (__SILICON_REVISION__ < 5)
-/* Flag Data Register Writes One SCLK Cycle After Edge Is Detected May Clear Interrupt Status */
+/* Flag Data Register Writes One SCLK Cycle after Edge Is Detected May Clear Interrupt Status */
#define ANOMALY_05000333 (__SILICON_REVISION__ < 5)
-/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available on Older Silicon) */
+/* ALT_TIMING Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000339 (__SILICON_REVISION__ < 5)
/* Memory DMA FIFO Causes Throughput Degradation on Writes to External Memory */
#define ANOMALY_05000343 (__SILICON_REVISION__ < 5)
@@ -276,7 +276,7 @@
#define ANOMALY_05000428 (__SILICON_REVISION__ > 3)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
@@ -284,6 +284,7 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000353 (1)
@@ -298,5 +299,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h
index f79f6626..8be3135 100644
--- a/arch/blackfin/mach-bf561/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h
@@ -34,7 +34,6 @@
#define BF561_FAMILY
#include "bf561.h"
-#include "mem_map.h"
#include "defBF561.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index 419dffd..a63e15c 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -1,13 +1,16 @@
/*
- * Memory MAP
- * Common header file for blackfin BF561 of processors.
+ * BF561 memory map
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_561_H_
-#define _MEM_MAP_561_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x2C000000 /* Async Bank 3 */
@@ -82,9 +85,6 @@
#define COREA_L1_SCRATCH_START 0xFFB00000
#define COREB_L1_SCRATCH_START 0xFF700000
-#define L1_SCRATCH_START COREA_L1_SCRATCH_START
-#define L1_SCRATCH_LENGTH 0x1000
-
#ifdef __ASSEMBLY__
/*
@@ -155,14 +155,42 @@
dreg = ROT dreg BY -1; \
dreg = CC;
-#else
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
+static inline unsigned long get_l1_scratch_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
+}
+static inline unsigned long get_l1_code_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_CODE_START : COREA_L1_CODE_START;
+}
+static inline unsigned long get_l1_data_a_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;
+}
+static inline unsigned long get_l1_data_b_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;
+}
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
+static inline unsigned long get_l1_scratch_start(void)
+{
+ return get_l1_scratch_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_code_start(void)
+{
+ return get_l1_code_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_data_a_start(void)
+{
+ return get_l1_data_a_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_data_b_start(void)
+{
+ return get_l1_data_b_start_cpu(blackfin_core_id());
+}
+
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
-#endif /* _MEM_MAP_533_H_ */
+#endif
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index da93d92..5998d86 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -74,7 +74,7 @@
/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
#if ANOMALY_05000220 && \
- ((defined(CONFIG_BFIN_WB) && defined(CONFIG_BFIN_L2_NOT_CACHED)) || \
- (!defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_L2_WB)))
+ ((defined(CONFIG_BFIN_EXTMEM_WRITEBACK) && !defined(CONFIG_BFIN_L2_DCACHEABLE)) || \
+ (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
#endif
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 70e3411..85c6580 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -141,7 +141,7 @@
sclk = get_sclk() / 1000;
#if ANOMALY_05000273 || ANOMALY_05000274 || \
- (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE))
+ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
min_cclk = sclk * 2;
#else
min_cclk = sclk;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 31fa313..5a4e7c7 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1609,6 +1609,7 @@
.long _sys_preadv
.long _sys_pwritev
.long _sys_rt_tgsigqueueinfo
+ .long _sys_perf_counter_open
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index af70f09..b421501 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1052,35 +1052,34 @@
set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-#if defined(CONFIG_TICKSOURCE_GPTMR0)
- case IRQ_TIMER0:
- set_irq_handler(irq, handle_percpu_irq);
- break;
-#endif
#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
set_irq_handler(irq, handle_percpu_irq);
break;
#endif
- default:
#ifdef CONFIG_IPIPE
- /*
- * We want internal interrupt sources to be
- * masked, because ISRs may trigger interrupts
- * recursively (e.g. DMA), but interrupts are
- * _not_ masked at CPU level. So let's handle
- * most of them as level interrupts, except
- * the timer interrupt which is special.
- */
- if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
- set_irq_handler(irq, handle_simple_irq);
- else
- set_irq_handler(irq, handle_level_irq);
-#else /* !CONFIG_IPIPE */
+#ifndef CONFIG_TICKSOURCE_CORETMR
+ case IRQ_TIMER0:
set_irq_handler(irq, handle_simple_irq);
-#endif /* !CONFIG_IPIPE */
break;
+#endif /* !CONFIG_TICKSOURCE_CORETMR */
+ case IRQ_CORETMR:
+ set_irq_handler(irq, handle_simple_irq);
+ break;
+ default:
+ set_irq_handler(irq, handle_level_irq);
+ break;
+#else /* !CONFIG_IPIPE */
+#ifdef CONFIG_TICKSOURCE_GPTMR0
+ case IRQ_TIMER0:
+ set_irq_handler(irq, handle_percpu_irq);
+ break;
+#endif /* CONFIG_TICKSOURCE_GPTMR0 */
+ default:
+ set_irq_handler(irq, handle_simple_irq);
+ break;
+#endif /* !CONFIG_IPIPE */
}
}
@@ -1224,15 +1223,14 @@
asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
- struct ipipe_domain *this_domain = ipipe_current_domain;
+ struct ipipe_domain *this_domain = __ipipe_current_domain;
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
int irq, s;
- if (likely(vec == EVT_IVTMR_P)) {
+ if (likely(vec == EVT_IVTMR_P))
irq = IRQ_CORETMR;
-
- } else {
+ else {
#if defined(SIC_ISR0) || defined(SICA_ISR0)
unsigned long sic_status[3];
@@ -1262,12 +1260,11 @@
break;
}
#endif
-
irq = ivg->irqno;
}
if (irq == IRQ_SYSTMR) {
-#ifndef CONFIG_GENERIC_CLOCKEVENTS
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
#endif
/* This is basically what we need from the register frame. */
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index bce5a84..9e7e27b 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -132,7 +132,7 @@
return 0;
}
-#ifdef CONFIG_BFIN_WB
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
static void flushinv_all_dcache(void)
{
u32 way, bank, subbank, set;
@@ -175,7 +175,7 @@
#ifdef CONFIG_BFIN_DCACHE
unsigned long ctrl;
-#ifdef CONFIG_BFIN_WB
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
flushinv_all_dcache();
#endif
SSYNC();
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 014a55a..68bd0bd6 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -160,7 +160,7 @@
/* do not count in kernel image between _rambase and _ramstart */
reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT;
-#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT;
#endif
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index c4c76db..f925115 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -163,7 +163,7 @@
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, writeaccess & 1);
+ fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 05093d4..30f5d10 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -163,7 +163,7 @@
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, ear0, write);
+ fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 0490794..745e095 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -9,6 +9,11 @@
extern void no_iommu_init(void);
extern int force_iommu, no_iommu;
extern int iommu_detected;
+#ifdef CONFIG_DMAR
+extern int iommu_pass_through;
+#else
+#define iommu_pass_through (0)
+#endif
extern void iommu_dma_init(void);
extern void machvec_init(const char *name);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 1376da4..0569596 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,6 +32,8 @@
int force_iommu __read_mostly;
#endif
+int iommu_pass_through;
+
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 285aae8..223abb1 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -46,7 +46,7 @@
void __init pci_swiotlb_init(void)
{
- if (!iommu_detected) {
+ if (!iommu_detected || iommu_pass_through) {
#ifdef CONFIG_IA64_GENERIC
swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 23088be..19261a9 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -154,7 +154,7 @@
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0);
+ fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We ran out of memory, or some other thing happened
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 4a71df4..7274b47 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -196,7 +196,7 @@
*/
addr = (address & PAGE_MASK);
set_thread_fault_code(error_code);
- fault = handle_mm_fault(mm, vma, addr, write);
+ fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index f493f03..d0e35cf 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -155,7 +155,7 @@
*/
survive:
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
#ifdef DEBUG
printk("handle_mm_fault returns %d\n",fault);
#endif
diff --git a/arch/microblaze/kernel/init_task.c b/arch/microblaze/kernel/init_task.c
index 48eb9fb..67da225 100644
--- a/arch/microblaze/kernel/init_task.c
+++ b/arch/microblaze/kernel/init_task.c
@@ -18,8 +18,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 8ae807a..d34d38d 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -62,7 +62,8 @@
_sdata = . ;
.data ALIGN (4096) : { /* page aligned when MMU used - origin 0x4 */
- *(.data)
+ DATA_DATA
+ CONSTRUCTORS
}
. = ALIGN(32);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
@@ -98,13 +99,13 @@
. = ALIGN(4096);
.init.text : {
_sinittext = . ;
- *(.init.text)
- *(.exit.text)
- *(.exit.data)
+ INIT_TEXT
_einittext = .;
}
- .init.data : { *(.init.data) }
+ .init.data : {
+ INIT_DATA
+ }
. = ALIGN(4);
.init.ivt : {
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 5e67cd1..956607a 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -232,7 +232,7 @@
* the fault.
*/
survive:
- fault = handle_mm_fault(mm, vma, address, is_write);
+ fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 55767ad..6751ce9 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -102,7 +102,7 @@
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 24de6b9..bcebcef 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -38,14 +38,10 @@
_etext = .; /* End of text section */
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
+ EXCEPTION_TABLE(16)
BUG_TABLE
- RODATA
+ RO_DATA(PAGE_SIZE)
/* writeable */
.data : { /* Data */
@@ -53,27 +49,19 @@
CONSTRUCTORS
}
- . = ALIGN(PAGE_SIZE);
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
+ .data_nosave : { NOSAVE_DATA; }
- . = ALIGN(PAGE_SIZE);
- .data.page_aligned : { *(.data.idt) }
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+ .data.page_aligned : { PAGE_ALIGNED_DATA(PAGE_SIZE); }
+ .data.cacheline_aligned : { CACHELINE_ALIGNED_DATA(32); }
/* rarely changed data like cpu maps */
. = ALIGN(32);
.data.read_mostly : AT(ADDR(.data.read_mostly)) {
- *(.data.read_mostly)
+ READ_MOSTLY_DATA(32);
_edata = .; /* End of data section */
}
- . = ALIGN(THREAD_SIZE); /* init_task */
- .data.init_task : { *(.data.init_task) }
+ .data.init_task : { INIT_TASK(THREAD_SIZE); }
/* might get freed after init */
. = ALIGN(PAGE_SIZE);
@@ -88,23 +76,18 @@
__init_begin = .;
.init.text : {
_sinittext = .;
- *(.init.text)
+ INIT_TEXT;
_einittext = .;
}
- .init.data : { *(.init.data) }
- . = ALIGN(16);
- __setup_start = .;
- .setup.init : { KEEP(*(.init.setup)) }
- __setup_end = .;
+ .init.data : { INIT_DATA; }
+ .setup.init : { INIT_SETUP(16); }
__initcall_start = .;
.initcall.init : {
INITCALLS
}
__initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
+ .con_initcall.init : { CON_INITCALL; }
SECURITY_INIT
. = ALIGN(4);
@@ -114,28 +97,17 @@
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
- .exit.data : { *(.exit.data) }
+ .exit.text : { EXIT_TEXT; }
+ .exit.data : { EXIT_DATA; }
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
+ .init.ramfs : { INIT_RAM_FS; }
PERCPU(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss.page_aligned)
- *(.bss)
- }
- . = ALIGN(4);
- __bss_stop = .;
+ BSS(4)
_end = . ;
@@ -145,7 +117,7 @@
/* Sections to be discarded */
/DISCARD/ : {
- *(.exitcall.exit)
+ EXIT_CALL
}
STABS_DEBUG
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 33cf250..a62e1e1 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -258,7 +258,7 @@
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 92c7fa4..bfb6dd6a 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -202,7 +202,7 @@
* fault.
*/
- fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0);
+ fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We hit a shared mapping outside of the file, or some
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9fb344d..bf6cedf 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -126,6 +126,7 @@
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
+ select HAVE_PERF_COUNTERS
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index b7f8f4a..867ab8e 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -131,6 +131,8 @@
struct irq_chip;
#ifdef CONFIG_PERF_COUNTERS
+
+#ifdef CONFIG_PPC64
static inline unsigned long test_perf_counter_pending(void)
{
unsigned long x;
@@ -154,15 +156,15 @@
"r" (0),
"i" (offsetof(struct paca_struct, perf_counter_pending)));
}
+#endif /* CONFIG_PPC64 */
-#else
+#else /* CONFIG_PERF_COUNTERS */
static inline unsigned long test_perf_counter_pending(void)
{
return 0;
}
-static inline void set_perf_counter_pending(void) {}
static inline void clear_perf_counter_pending(void) {}
#endif /* CONFIG_PERF_COUNTERS */
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
index cc7c887..8ccd4e1 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_counter.h
@@ -10,6 +10,8 @@
*/
#include <linux/types.h>
+#include <asm/hw_irq.h>
+
#define MAX_HWCOUNTERS 8
#define MAX_EVENT_ALTERNATIVES 8
#define MAX_LIMITED_HWCOUNTERS 2
@@ -19,27 +21,27 @@
* describe the PMU on a particular POWER-family CPU.
*/
struct power_pmu {
- int n_counter;
- int max_alternatives;
- u64 add_fields;
- u64 test_adder;
- int (*compute_mmcr)(u64 events[], int n_ev,
- unsigned int hwc[], u64 mmcr[]);
- int (*get_constraint)(u64 event, u64 *mskp, u64 *valp);
- int (*get_alternatives)(u64 event, unsigned int flags,
- u64 alt[]);
- void (*disable_pmc)(unsigned int pmc, u64 mmcr[]);
- int (*limited_pmc_event)(u64 event);
- u32 flags;
- int n_generic;
- int *generic_events;
- int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+ const char *name;
+ int n_counter;
+ int max_alternatives;
+ unsigned long add_fields;
+ unsigned long test_adder;
+ int (*compute_mmcr)(u64 events[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[]);
+ int (*get_constraint)(u64 event, unsigned long *mskp,
+ unsigned long *valp);
+ int (*get_alternatives)(u64 event, unsigned int flags,
+ u64 alt[]);
+ void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
+ int (*limited_pmc_event)(u64 event);
+ u32 flags;
+ int n_generic;
+ int *generic_events;
+ int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
};
-extern struct power_pmu *ppmu;
-
/*
* Values for power_pmu.flags
*/
@@ -53,15 +55,23 @@
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
+extern int register_power_pmu(struct power_pmu *);
+
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
-#define perf_misc_flags(regs) perf_misc_flags(regs)
-
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
/*
- * The power_pmu.get_constraint function returns a 64-bit value and
- * a 64-bit mask that express the constraints between this event and
+ * Only override the default definitions in include/linux/perf_counter.h
+ * if we have hardware PMU support.
+ */
+#ifdef CONFIG_PPC_PERF_CTRS
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+#endif
+
+/*
+ * The power_pmu.get_constraint function returns a 32/64-bit value and
+ * a 32/64-bit mask that express the constraints between this event and
* other events.
*
* The value and mask are divided up into (non-overlapping) bitfields
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 6a4fb29..b73396b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,9 +97,10 @@
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \
- power5-pmu.o power5+-pmu.o power6-pmu.o \
- power7-pmu.o
+obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o
+obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
+ power5+-pmu.o power6-pmu.o power7-pmu.o
+obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
@@ -108,6 +109,7 @@
endif
obj-$(CONFIG_PPC64) += $(obj64-y)
+obj-$(CONFIG_PPC32) += $(obj32-y)
ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
obj-y += ppc_save_regs.o
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 1b12696..ce1f3e4 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -586,7 +586,7 @@
return;
}
- if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
*parent = old;
return;
}
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
new file mode 100644
index 0000000..75ff47f
--- /dev/null
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -0,0 +1,417 @@
+/*
+ * Performance counter support for MPC7450-family processors.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/string.h>
+#include <linux/perf_counter.h>
+#include <linux/string.h>
+#include <asm/reg.h>
+#include <asm/cputable.h>
+
+#define N_COUNTER 6 /* Number of hardware counters */
+#define MAX_ALT 3 /* Maximum number of event alternative codes */
+
+/*
+ * Bits in event code for MPC7450 family
+ */
+#define PM_THRMULT_MSKS 0x40000
+#define PM_THRESH_SH 12
+#define PM_THRESH_MSK 0x3f
+#define PM_PMC_SH 8
+#define PM_PMC_MSK 7
+#define PM_PMCSEL_MSK 0x7f
+
+/*
+ * Classify events according to how specific their PMC requirements are.
+ * Result is:
+ * 0: can go on any PMC
+ * 1: can go on PMCs 1-4
+ * 2: can go on PMCs 1,2,4
+ * 3: can go on PMCs 1 or 2
+ * 4: can only go on one PMC
+ * -1: event code is invalid
+ */
+#define N_CLASSES 5
+
+static int mpc7450_classify_event(u32 event)
+{
+ int pmc;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > N_COUNTER)
+ return -1;
+ return 4;
+ }
+ event &= PM_PMCSEL_MSK;
+ if (event <= 1)
+ return 0;
+ if (event <= 7)
+ return 1;
+ if (event <= 13)
+ return 2;
+ if (event <= 22)
+ return 3;
+ return -1;
+}
+
+/*
+ * Events using threshold and possible threshold scale:
+ * code scale? name
+ * 11e N PM_INSTQ_EXCEED_CYC
+ * 11f N PM_ALTV_IQ_EXCEED_CYC
+ * 128 Y PM_DTLB_SEARCH_EXCEED_CYC
+ * 12b Y PM_LD_MISS_EXCEED_L1_CYC
+ * 220 N PM_CQ_EXCEED_CYC
+ * 30c N PM_GPR_RB_EXCEED_CYC
+ * 30d ? PM_FPR_IQ_EXCEED_CYC ?
+ * 311 Y PM_ITLB_SEARCH_EXCEED
+ * 410 N PM_GPR_IQ_EXCEED_CYC
+ */
+
+/*
+ * Return use of threshold and threshold scale bits:
+ * 0 = uses neither, 1 = uses threshold, 2 = uses both
+ */
+static int mpc7450_threshold_use(u32 event)
+{
+ int pmc, sel;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ sel = event & PM_PMCSEL_MSK;
+ switch (pmc) {
+ case 1:
+ if (sel == 0x1e || sel == 0x1f)
+ return 1;
+ if (sel == 0x28 || sel == 0x2b)
+ return 2;
+ break;
+ case 2:
+ if (sel == 0x20)
+ return 1;
+ break;
+ case 3:
+ if (sel == 0xc || sel == 0xd)
+ return 1;
+ if (sel == 0x11)
+ return 2;
+ break;
+ case 4:
+ if (sel == 0x10)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Layout of constraint bits:
+ * 33222222222211111111110000000000
+ * 10987654321098765432109876543210
+ * |< >< > < > < ><><><><><><>
+ * TS TV G4 G3 G2P6P5P4P3P2P1
+ *
+ * P1 - P6
+ * 0 - 11: Count of events needing PMC1 .. PMC6
+ *
+ * G2
+ * 12 - 14: Count of events needing PMC1 or PMC2
+ *
+ * G3
+ * 16 - 18: Count of events needing PMC1, PMC2 or PMC4
+ *
+ * G4
+ * 20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
+ *
+ * TV
+ * 24 - 29: Threshold value requested
+ *
+ * TS
+ * 30: Threshold scale value requested
+ */
+
+static u32 pmcbits[N_COUNTER][2] = {
+ { 0x00844002, 0x00111001 }, /* PMC1 mask, value: P1,G2,G3,G4 */
+ { 0x00844008, 0x00111004 }, /* PMC2: P2,G2,G3,G4 */
+ { 0x00800020, 0x00100010 }, /* PMC3: P3,G4 */
+ { 0x00840080, 0x00110040 }, /* PMC4: P4,G3,G4 */
+ { 0x00000200, 0x00000100 }, /* PMC5: P5 */
+ { 0x00000800, 0x00000400 } /* PMC6: P6 */
+};
+
+static u32 classbits[N_CLASSES - 1][2] = {
+ { 0x00000000, 0x00000000 }, /* class 0: no constraint */
+ { 0x00800000, 0x00100000 }, /* class 1: G4 */
+ { 0x00040000, 0x00010000 }, /* class 2: G3 */
+ { 0x00004000, 0x00001000 }, /* class 3: G2 */
+};
+
+static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
+{
+ int pmc, class;
+ u32 mask, value;
+ int thresh, tuse;
+
+ class = mpc7450_classify_event(event);
+ if (class < 0)
+ return -1;
+ if (class == 4) {
+ pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
+ mask = pmcbits[pmc - 1][0];
+ value = pmcbits[pmc - 1][1];
+ } else {
+ mask = classbits[class][0];
+ value = classbits[class][1];
+ }
+
+ tuse = mpc7450_threshold_use(event);
+ if (tuse) {
+ thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK;
+ mask |= 0x3f << 24;
+ value |= thresh << 24;
+ if (tuse == 2) {
+ mask |= 0x40000000;
+ if ((unsigned int)event & PM_THRMULT_MSKS)
+ value |= 0x40000000;
+ }
+ }
+
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x217, 0x317 }, /* PM_L1_DCACHE_MISS */
+ { 0x418, 0x50f, 0x60f }, /* PM_SNOOP_RETRY */
+ { 0x502, 0x602 }, /* PM_L2_HIT */
+ { 0x503, 0x603 }, /* PM_L3_HIT */
+ { 0x504, 0x604 }, /* PM_L2_ICACHE_MISS */
+ { 0x505, 0x605 }, /* PM_L3_ICACHE_MISS */
+ { 0x506, 0x606 }, /* PM_L2_DCACHE_MISS */
+ { 0x507, 0x607 }, /* PM_L3_DCACHE_MISS */
+ { 0x50a, 0x623 }, /* PM_LD_HIT_L3 */
+ { 0x50b, 0x624 }, /* PM_ST_HIT_L3 */
+ { 0x50d, 0x60d }, /* PM_L2_TOUCH_HIT */
+ { 0x50e, 0x60e }, /* PM_L3_TOUCH_HIT */
+ { 0x512, 0x612 }, /* PM_INT_LOCAL */
+ { 0x513, 0x61d }, /* PM_L2_MISS */
+ { 0x514, 0x61e }, /* PM_L3_MISS */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u32 event)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ break;
+ for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+ if (event == event_alternatives[i][j])
+ return i;
+ }
+ return -1;
+}
+
+static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, nalt = 1;
+ u32 ae;
+
+ alt[0] = event;
+ nalt = 1;
+ i = find_alternative((u32)event);
+ if (i >= 0) {
+ for (j = 0; j < MAX_ALT; ++j) {
+ ae = event_alternatives[i][j];
+ if (ae && ae != (u32)event)
+ alt[nalt++] = ae;
+ }
+ }
+ return nalt;
+}
+
+/*
+ * Bitmaps of which PMCs each class can use for classes 0 - 3.
+ * Bit i is set if PMC i+1 is usable.
+ */
+static const u8 classmap[N_CLASSES] = {
+ 0x3f, 0x0f, 0x0b, 0x03, 0
+};
+
+/* Bit position and width of each PMCSEL field */
+static const int pmcsel_shift[N_COUNTER] = {
+ 6, 0, 27, 22, 17, 11
+};
+static const u32 pmcsel_mask[N_COUNTER] = {
+ 0x7f, 0x3f, 0x1f, 0x1f, 0x1f, 0x3f
+};
+
+/*
+ * Compute MMCR0/1/2 values for a set of events.
+ */
+static int mpc7450_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[])
+{
+ u8 event_index[N_CLASSES][N_COUNTER];
+ int n_classevent[N_CLASSES];
+ int i, j, class, tuse;
+ u32 pmc_inuse = 0, pmc_avail;
+ u32 mmcr0 = 0, mmcr1 = 0, mmcr2 = 0;
+ u32 ev, pmc, thresh;
+
+ if (n_ev > N_COUNTER)
+ return -1;
+
+ /* First pass: count usage in each class */
+ for (i = 0; i < N_CLASSES; ++i)
+ n_classevent[i] = 0;
+ for (i = 0; i < n_ev; ++i) {
+ class = mpc7450_classify_event(event[i]);
+ if (class < 0)
+ return -1;
+ j = n_classevent[class]++;
+ event_index[class][j] = i;
+ }
+
+ /* Second pass: allocate PMCs from most specific event to least */
+ for (class = N_CLASSES - 1; class >= 0; --class) {
+ for (i = 0; i < n_classevent[class]; ++i) {
+ ev = event[event_index[class][i]];
+ if (class == 4) {
+ pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1;
+ } else {
+ /* Find a suitable PMC */
+ pmc_avail = classmap[class] & ~pmc_inuse;
+ if (!pmc_avail)
+ return -1;
+ pmc = ffs(pmc_avail);
+ }
+ pmc_inuse |= 1 << (pmc - 1);
+
+ tuse = mpc7450_threshold_use(ev);
+ if (tuse) {
+ thresh = (ev >> PM_THRESH_SH) & PM_THRESH_MSK;
+ mmcr0 |= thresh << 16;
+ if (tuse == 2 && (ev & PM_THRMULT_MSKS))
+ mmcr2 = 0x80000000;
+ }
+ ev &= pmcsel_mask[pmc - 1];
+ ev <<= pmcsel_shift[pmc - 1];
+ if (pmc <= 2)
+ mmcr0 |= ev;
+ else
+ mmcr1 |= ev;
+ hwc[event_index[class][i]] = pmc - 1;
+ }
+ }
+
+ if (pmc_inuse & 1)
+ mmcr0 |= MMCR0_PMC1CE;
+ if (pmc_inuse & 0x3e)
+ mmcr0 |= MMCR0_PMCnCE;
+
+ /* Return MMCRx values */
+ mmcr[0] = mmcr0;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcr2;
+ return 0;
+}
+
+/*
+ * Disable counting by a PMC.
+ * Note that the pmc argument is 0-based here, not 1-based.
+ */
+static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+{
+ if (pmc <= 1)
+ mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
+ else
+ mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
+}
+
+static int mpc7450_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 1,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 2,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x217, /* PM_L1_DCACHE_MISS */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x122, /* PM_BR_CMPL */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x41c, /* PM_BR_MPRED */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x225 },
+ [C(OP_WRITE)] = { 0, 0x227 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x129, 0x115 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0x634, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { 0, 0 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x312 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x223 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x122, 0x41c },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu mpc7450_pmu = {
+ .name = "MPC7450 family",
+ .n_counter = N_COUNTER,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x00111555ul,
+ .test_adder = 0x00301000ul,
+ .compute_mmcr = mpc7450_compute_mmcr,
+ .get_constraint = mpc7450_get_constraint,
+ .get_alternatives = mpc7450_get_alternatives,
+ .disable_pmc = mpc7450_disable_pmc,
+ .n_generic = ARRAY_SIZE(mpc7450_generic_events),
+ .generic_events = mpc7450_generic_events,
+ .cache_events = &mpc7450_cache_events,
+};
+
+static int init_mpc7450_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
+ return -ENODEV;
+
+ return register_power_pmu(&mpc7450_pmu);
+}
+
+arch_initcall(init_mpc7450_pmu);
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bb20238..809fdf9 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -29,7 +29,7 @@
struct perf_counter *counter[MAX_HWCOUNTERS];
u64 events[MAX_HWCOUNTERS];
unsigned int flags[MAX_HWCOUNTERS];
- u64 mmcr[3];
+ unsigned long mmcr[3];
struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
};
@@ -46,6 +46,115 @@
*/
static unsigned int freeze_counters_kernel = MMCR0_FCS;
+/*
+ * 32-bit doesn't have MMCRA but does have an MMCR2,
+ * and a few other names are different.
+ */
+#ifdef CONFIG_PPC32
+
+#define MMCR0_FCHV 0
+#define MMCR0_PMCjCE MMCR0_PMCnCE
+
+#define SPRN_MMCRA SPRN_MMCR2
+#define MMCRA_SAMPLE_ENABLE 0
+
+static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
+{
+ return 0;
+}
+static inline void perf_set_pmu_inuse(int inuse) { }
+static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
+static inline u32 perf_get_misc_flags(struct pt_regs *regs)
+{
+ return 0;
+}
+static inline void perf_read_regs(struct pt_regs *regs) { }
+static inline int perf_intr_is_nmi(struct pt_regs *regs)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC32 */
+
+/*
+ * Things that are specific to 64-bit implementations.
+ */
+#ifdef CONFIG_PPC64
+
+static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
+{
+ unsigned long mmcra = regs->dsisr;
+
+ if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
+ unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
+ if (slot > 1)
+ return 4 * (slot - 1);
+ }
+ return 0;
+}
+
+static inline void perf_set_pmu_inuse(int inuse)
+{
+ get_lppaca()->pmcregs_in_use = inuse;
+}
+
+/*
+ * The user wants a data address recorded.
+ * If we're not doing instruction sampling, give them the SDAR
+ * (sampled data address). If we are doing instruction sampling, then
+ * only give them the SDAR if it corresponds to the instruction
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
+ * bit in MMCRA.
+ */
+static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
+{
+ unsigned long mmcra = regs->dsisr;
+ unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
+ POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
+
+ if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+ *addrp = mfspr(SPRN_SDAR);
+}
+
+static inline u32 perf_get_misc_flags(struct pt_regs *regs)
+{
+ unsigned long mmcra = regs->dsisr;
+
+ if (TRAP(regs) != 0xf00)
+ return 0; /* not a PMU interrupt */
+
+ if (ppmu->flags & PPMU_ALT_SIPR) {
+ if (mmcra & POWER6_MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & POWER6_MMCRA_SIPR) ?
+ PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
+ }
+ if (mmcra & MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+}
+
+/*
+ * Overload regs->dsisr to store MMCRA so we only need to read it once
+ * on each interrupt.
+ */
+static inline void perf_read_regs(struct pt_regs *regs)
+{
+ regs->dsisr = mfspr(SPRN_MMCRA);
+}
+
+/*
+ * If interrupts were soft-disabled when a PMU interrupt occurs, treat
+ * it as an NMI.
+ */
+static inline int perf_intr_is_nmi(struct pt_regs *regs)
+{
+ return !regs->softe;
+}
+
+#endif /* CONFIG_PPC64 */
+
static void perf_counter_interrupt(struct pt_regs *regs);
void perf_counter_print_debug(void)
@@ -78,12 +187,14 @@
case 6:
val = mfspr(SPRN_PMC6);
break;
+#ifdef CONFIG_PPC64
case 7:
val = mfspr(SPRN_PMC7);
break;
case 8:
val = mfspr(SPRN_PMC8);
break;
+#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
@@ -115,12 +226,14 @@
case 6:
mtspr(SPRN_PMC6, val);
break;
+#ifdef CONFIG_PPC64
case 7:
mtspr(SPRN_PMC7, val);
break;
case 8:
mtspr(SPRN_PMC8, val);
break;
+#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
@@ -135,15 +248,15 @@
static int power_check_constraints(u64 event[], unsigned int cflags[],
int n_ev)
{
- u64 mask, value, nv;
+ unsigned long mask, value, nv;
u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
+ unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
int i, j;
- u64 addf = ppmu->add_fields;
- u64 tadd = ppmu->test_adder;
+ unsigned long addf = ppmu->add_fields;
+ unsigned long tadd = ppmu->test_adder;
if (n_ev > ppmu->n_counter)
return -1;
@@ -283,7 +396,7 @@
static void power_pmu_read(struct perf_counter *counter)
{
- long val, delta, prev;
+ s64 val, delta, prev;
if (!counter->hw.idx)
return;
@@ -403,14 +516,12 @@
void hw_perf_disable(void)
{
struct cpu_hw_counters *cpuhw;
- unsigned long ret;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_counters);
- ret = cpuhw->disabled;
- if (!ret) {
+ if (!cpuhw->disabled) {
cpuhw->disabled = 1;
cpuhw->n_added = 0;
@@ -479,7 +590,7 @@
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
if (cpuhw->n_counters == 0)
- get_lppaca()->pmcregs_in_use = 0;
+ perf_set_pmu_inuse(0);
goto out_enable;
}
@@ -512,7 +623,7 @@
* bit set and set the hardware counters to their initial values.
* Then unfreeze the counters.
*/
- get_lppaca()->pmcregs_in_use = 1;
+ perf_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -913,6 +1024,8 @@
case PERF_TYPE_RAW:
ev = counter->attr.config;
break;
+ default:
+ return ERR_PTR(-EINVAL);
}
counter->hw.config_base = ev;
counter->hw.idx = 0;
@@ -1007,13 +1120,12 @@
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
-static void record_and_restart(struct perf_counter *counter, long val,
+static void record_and_restart(struct perf_counter *counter, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = counter->hw.sample_period;
s64 prev, delta, left;
int record = 0;
- u64 addr, mmcra, sdsync;
/* we don't have to worry about interrupts here */
prev = atomic64_read(&counter->hw.prev_count);
@@ -1033,8 +1145,8 @@
left = period;
record = 1;
}
- if (left < 0x80000000L)
- val = 0x80000000L - left;
+ if (left < 0x80000000LL)
+ val = 0x80000000LL - left;
}
/*
@@ -1047,22 +1159,9 @@
.period = counter->hw.last_period,
};
- if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
- /*
- * The user wants a data address recorded.
- * If we're not doing instruction sampling,
- * give them the SDAR (sampled data address).
- * If we are doing instruction sampling, then only
- * give them the SDAR if it corresponds to the
- * instruction pointed to by SIAR; this is indicated
- * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
- */
- mmcra = regs->dsisr;
- sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
- POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
- if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
- data.addr = mfspr(SPRN_SDAR);
- }
+ if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
+ perf_get_data_addr(regs, &data.addr);
+
if (perf_counter_overflow(counter, nmi, &data)) {
/*
* Interrupts are coming too fast - throttle them
@@ -1088,25 +1187,12 @@
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
- unsigned long mmcra;
+ u32 flags = perf_get_misc_flags(regs);
- if (TRAP(regs) != 0xf00) {
- /* not a PMU interrupt */
- return user_mode(regs) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
- }
-
- mmcra = regs->dsisr;
- if (ppmu->flags & PPMU_ALT_SIPR) {
- if (mmcra & POWER6_MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
- return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
- }
- if (mmcra & MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
- return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
+ if (flags)
+ return flags;
+ return user_mode(regs) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
}
/*
@@ -1115,20 +1201,12 @@
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- unsigned long mmcra;
unsigned long ip;
- unsigned long slot;
if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */
- ip = mfspr(SPRN_SIAR);
- mmcra = regs->dsisr;
- if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
- slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
- if (slot > 1)
- ip += 4 * (slot - 1);
- }
+ ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
return ip;
}
@@ -1140,7 +1218,7 @@
int i;
struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
struct perf_counter *counter;
- long val;
+ unsigned long val;
int found = 0;
int nmi;
@@ -1148,16 +1226,9 @@
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
- /*
- * Overload regs->dsisr to store MMCRA so we only need to read it once.
- */
- regs->dsisr = mfspr(SPRN_MMCRA);
+ perf_read_regs(regs);
- /*
- * If interrupts were soft-disabled when this PMU interrupt
- * occurred, treat it as an NMI.
- */
- nmi = !regs->softe;
+ nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
@@ -1214,50 +1285,22 @@
cpuhw->mmcr[0] = MMCR0_FC;
}
-extern struct power_pmu power4_pmu;
-extern struct power_pmu ppc970_pmu;
-extern struct power_pmu power5_pmu;
-extern struct power_pmu power5p_pmu;
-extern struct power_pmu power6_pmu;
-extern struct power_pmu power7_pmu;
-
-static int init_perf_counters(void)
+int register_power_pmu(struct power_pmu *pmu)
{
- unsigned long pvr;
+ if (ppmu)
+ return -EBUSY; /* something's already registered */
- /* XXX should get this from cputable */
- pvr = mfspr(SPRN_PVR);
- switch (PVR_VER(pvr)) {
- case PV_POWER4:
- case PV_POWER4p:
- ppmu = &power4_pmu;
- break;
- case PV_970:
- case PV_970FX:
- case PV_970MP:
- ppmu = &ppc970_pmu;
- break;
- case PV_POWER5:
- ppmu = &power5_pmu;
- break;
- case PV_POWER5p:
- ppmu = &power5p_pmu;
- break;
- case 0x3e:
- ppmu = &power6_pmu;
- break;
- case 0x3f:
- ppmu = &power7_pmu;
- break;
- }
+ ppmu = pmu;
+ pr_info("%s performance monitor hardware support registered\n",
+ pmu->name);
+#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
freeze_counters_kernel = MMCR0_FCHV;
+#endif /* CONFIG_PPC64 */
return 0;
}
-
-arch_initcall(init_perf_counters);
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 07bd308..db90b0c 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER4
@@ -179,22 +181,22 @@
*/
static struct unitinfo {
- u64 value, mask;
- int unit;
- int lowerbit;
+ unsigned long value, mask;
+ int unit;
+ int lowerbit;
} p4_unitinfo[16] = {
- [PM_FPU] = { 0x44000000000000ull, 0x88000000000000ull, PM_FPU, 0 },
- [PM_ISU1] = { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
+ [PM_FPU] = { 0x44000000000000ul, 0x88000000000000ul, PM_FPU, 0 },
+ [PM_ISU1] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
[PM_ISU1_ALT] =
- { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
- [PM_IFU] = { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
+ { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
+ [PM_IFU] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
[PM_IFU_ALT] =
- { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
- [PM_IDU0] = { 0x10100000000000ull, 0x80840000000000ull, PM_IDU0, 1 },
- [PM_ISU2] = { 0x10140000000000ull, 0x80840000000000ull, PM_ISU2, 0 },
- [PM_LSU0] = { 0x01400000000000ull, 0x08800000000000ull, PM_LSU0, 0 },
- [PM_LSU1] = { 0x00000000000000ull, 0x00010000000000ull, PM_LSU1, 40 },
- [PM_GPS] = { 0x00000000000000ull, 0x00000000000000ull, PM_GPS, 0 }
+ { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
+ [PM_IDU0] = { 0x10100000000000ul, 0x80840000000000ul, PM_IDU0, 1 },
+ [PM_ISU2] = { 0x10140000000000ul, 0x80840000000000ul, PM_ISU2, 0 },
+ [PM_LSU0] = { 0x01400000000000ul, 0x08800000000000ul, PM_LSU0, 0 },
+ [PM_LSU1] = { 0x00000000000000ul, 0x00010000000000ul, PM_LSU1, 40 },
+ [PM_GPS] = { 0x00000000000000ul, 0x00000000000000ul, PM_GPS, 0 }
};
static unsigned char direct_marked_event[8] = {
@@ -249,10 +251,11 @@
return (mask >> (byte * 8 + bit)) & 1;
}
-static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p4_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, lower, sh;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -282,14 +285,14 @@
value |= p4_unitinfo[unit].value;
sh = p4_unitinfo[unit].lowerbit;
if (sh > 1)
- value |= (u64)lower << sh;
+ value |= (unsigned long)lower << sh;
else if (lower != sh)
return -1;
unit = p4_unitinfo[unit].unit;
/* Set byte lane select field */
mask |= 0xfULL << (28 - 4 * byte);
- value |= (u64)unit << (28 - 4 * byte);
+ value |= (unsigned long)unit << (28 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2/5/6 field */
@@ -353,9 +356,9 @@
}
static int p4_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+ unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel, lower;
unsigned int ttm, grp;
unsigned int pmc_inuse = 0;
@@ -429,9 +432,11 @@
return -1;
/* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */
- mmcr1 |= (u64)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH;
- mmcr1 |= (u64)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH;
- mmcr1 |= (u64)unituse[9] << MMCR1_TTM2SEL_SH;
+ mmcr1 |= (unsigned long)(unituse[3] * 2 + unituse[2])
+ << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)(unituse[7] * 3 + unituse[6] * 2)
+ << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)unituse[9] << MMCR1_TTM2SEL_SH;
/* Set TTCxSEL fields. */
if (unitlower & 0xe)
@@ -456,7 +461,8 @@
ttm = unit - 1; /* 2->1, 3->2 */
else
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2*byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
}
@@ -519,7 +525,7 @@
return 0;
}
-static void p4_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p4_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
/*
* Setting the PMCxSEL field to 0 disables PMC x.
@@ -583,16 +589,27 @@
},
};
-struct power_pmu power4_pmu = {
- .n_counter = 8,
- .max_alternatives = 5,
- .add_fields = 0x0000001100005555ull,
- .test_adder = 0x0011083300000000ull,
- .compute_mmcr = p4_compute_mmcr,
- .get_constraint = p4_get_constraint,
- .get_alternatives = p4_get_alternatives,
- .disable_pmc = p4_disable_pmc,
- .n_generic = ARRAY_SIZE(p4_generic_events),
- .generic_events = p4_generic_events,
- .cache_events = &power4_cache_events,
+static struct power_pmu power4_pmu = {
+ .name = "POWER4/4+",
+ .n_counter = 8,
+ .max_alternatives = 5,
+ .add_fields = 0x0000001100005555ul,
+ .test_adder = 0x0011083300000000ul,
+ .compute_mmcr = p4_compute_mmcr,
+ .get_constraint = p4_get_constraint,
+ .get_alternatives = p4_get_alternatives,
+ .disable_pmc = p4_disable_pmc,
+ .n_generic = ARRAY_SIZE(p4_generic_events),
+ .generic_events = p4_generic_events,
+ .cache_events = &power4_cache_events,
};
+
+static int init_power4_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4"))
+ return -ENODEV;
+
+ return register_power_pmu(&power4_pmu);
+}
+
+arch_initcall(init_power4_pmu);
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 41e5d2d..f4adca8 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
@@ -126,20 +128,21 @@
};
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
- [PM_FPU] = { 0x3200000000ull, 0x0100000000ull },
- [PM_ISU0] = { 0x0200000000ull, 0x0080000000ull },
- [PM_ISU1] = { 0x3200000000ull, 0x3100000000ull },
- [PM_IFU] = { 0x3200000000ull, 0x2100000000ull },
- [PM_IDU] = { 0x0e00000000ull, 0x0040000000ull },
- [PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull },
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0x3200000000ul, 0x0100000000ul },
+ [PM_ISU0] = { 0x0200000000ul, 0x0080000000ul },
+ [PM_ISU1] = { 0x3200000000ul, 0x3100000000ul },
+ [PM_IFU] = { 0x3200000000ul, 0x2100000000ul },
+ [PM_IDU] = { 0x0e00000000ul, 0x0040000000ul },
+ [PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul },
};
-static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power5p_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh;
int bit, fmask;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -171,17 +174,18 @@
bit = event & 7;
fmask = (bit == 6)? 7: 3;
sh = grsel_shift[bit];
- mask |= (u64)fmask << sh;
- value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+ mask |= (unsigned long)fmask << sh;
+ value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
+ << sh;
}
/* Set byte lane select field */
- mask |= 0xfULL << (24 - 4 * byte);
- value |= (u64)unit << (24 - 4 * byte);
+ mask |= 0xfUL << (24 - 4 * byte);
+ value |= (unsigned long)unit << (24 - 4 * byte);
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
- mask |= 0x8000000000000ull;
- value |= 0x1000000000000ull;
+ mask |= 0x8000000000000ul;
+ value |= 0x1000000000000ul;
}
*maskp = mask;
*valp = value;
@@ -452,10 +456,10 @@
}
static int power5p_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm;
int i, isbus, bit, grsel;
@@ -517,7 +521,7 @@
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
}
ttmuse = 0;
for (; i <= PM_GRS; ++i) {
@@ -525,7 +529,7 @@
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
}
if (ttmuse > 1)
return -1;
@@ -540,10 +544,11 @@
unit = PM_ISU0_ALT;
} else if (unit == PM_LSU1 + 1) {
/* select lower word of LSU1 for this byte */
- mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
}
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -568,7 +573,7 @@
if (isbus && (byte & 2) &&
(psel == 8 || psel == 0x10 || psel == 0x28))
/* add events on higher-numbered bus */
- mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+ mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
} else {
/* Instructions or run cycles on PMC5/6 */
--pmc;
@@ -576,7 +581,7 @@
if (isbus && unit == PM_GRS) {
bit = psel & 7;
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
- mmcr1 |= (u64)grsel << grsel_shift[bit];
+ mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
}
if (power5p_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -599,7 +604,7 @@
return 0;
}
-static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
@@ -654,18 +659,30 @@
},
};
-struct power_pmu power5p_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x7000000000055ull,
- .test_adder = 0x3000040000000ull,
- .compute_mmcr = power5p_compute_mmcr,
- .get_constraint = power5p_get_constraint,
- .get_alternatives = power5p_get_alternatives,
- .disable_pmc = power5p_disable_pmc,
- .limited_pmc_event = power5p_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6,
- .n_generic = ARRAY_SIZE(power5p_generic_events),
- .generic_events = power5p_generic_events,
- .cache_events = &power5p_cache_events,
+static struct power_pmu power5p_pmu = {
+ .name = "POWER5+/++",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x7000000000055ul,
+ .test_adder = 0x3000040000000ul,
+ .compute_mmcr = power5p_compute_mmcr,
+ .get_constraint = power5p_get_constraint,
+ .get_alternatives = power5p_get_alternatives,
+ .disable_pmc = power5p_disable_pmc,
+ .limited_pmc_event = power5p_limited_pmc_event,
+ .flags = PPMU_LIMITED_PMC5_6,
+ .n_generic = ARRAY_SIZE(power5p_generic_events),
+ .generic_events = power5p_generic_events,
+ .cache_events = &power5p_cache_events,
};
+
+static int init_power5p_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
+ && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++"))
+ return -ENODEV;
+
+ return register_power_pmu(&power5p_pmu);
+}
+
+arch_initcall(init_power5p_pmu);
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 05600b6..29b2c6c 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER5 (not POWER5++)
@@ -130,20 +132,21 @@
};
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
- [PM_FPU] = { 0xc0002000000000ull, 0x00001000000000ull },
- [PM_ISU0] = { 0x00002000000000ull, 0x00000800000000ull },
- [PM_ISU1] = { 0xc0002000000000ull, 0xc0001000000000ull },
- [PM_IFU] = { 0xc0002000000000ull, 0x80001000000000ull },
- [PM_IDU] = { 0x30002000000000ull, 0x00000400000000ull },
- [PM_GRS] = { 0x30002000000000ull, 0x30000400000000ull },
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul },
+ [PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul },
+ [PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul },
+ [PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul },
+ [PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul },
+ [PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul },
};
-static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power5_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh;
int bit, fmask;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -178,8 +181,9 @@
bit = event & 7;
fmask = (bit == 6)? 7: 3;
sh = grsel_shift[bit];
- mask |= (u64)fmask << sh;
- value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+ mask |= (unsigned long)fmask << sh;
+ value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
+ << sh;
}
/*
* Bus events on bytes 0 and 2 can be counted
@@ -188,22 +192,22 @@
if (!pmc)
grp = byte & 1;
/* Set byte lane select field */
- mask |= 0xfULL << (24 - 4 * byte);
- value |= (u64)unit << (24 - 4 * byte);
+ mask |= 0xfUL << (24 - 4 * byte);
+ value |= (unsigned long)unit << (24 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2 field */
- mask |= 0x200000000ull;
- value |= 0x080000000ull;
+ mask |= 0x200000000ul;
+ value |= 0x080000000ul;
} else if (grp == 1) {
/* increment PMC3/4 field */
- mask |= 0x40000000ull;
- value |= 0x10000000ull;
+ mask |= 0x40000000ul;
+ value |= 0x10000000ul;
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
- mask |= 0x8000000000000ull;
- value |= 0x1000000000000ull;
+ mask |= 0x8000000000000ul;
+ value |= 0x1000000000000ul;
}
*maskp = mask;
*valp = value;
@@ -383,10 +387,10 @@
}
static int power5_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm, grp;
int i, isbus, bit, grsel;
@@ -457,7 +461,7 @@
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
}
ttmuse = 0;
for (; i <= PM_GRS; ++i) {
@@ -465,7 +469,7 @@
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
}
if (ttmuse > 1)
return -1;
@@ -480,10 +484,11 @@
unit = PM_ISU0_ALT;
} else if (unit == PM_LSU1 + 1) {
/* select lower word of LSU1 for this byte */
- mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
}
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -513,7 +518,7 @@
--pmc;
if ((psel == 8 || psel == 0x10) && isbus && (byte & 2))
/* add events on higher-numbered bus */
- mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+ mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
} else {
/* Instructions or run cycles on PMC5/6 */
--pmc;
@@ -521,7 +526,7 @@
if (isbus && unit == PM_GRS) {
bit = psel & 7;
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
- mmcr1 |= (u64)grsel << grsel_shift[bit];
+ mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
}
if (power5_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -541,7 +546,7 @@
return 0;
}
-static void power5_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
@@ -596,16 +601,27 @@
},
};
-struct power_pmu power5_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x7000090000555ull,
- .test_adder = 0x3000490000000ull,
- .compute_mmcr = power5_compute_mmcr,
- .get_constraint = power5_get_constraint,
- .get_alternatives = power5_get_alternatives,
- .disable_pmc = power5_disable_pmc,
- .n_generic = ARRAY_SIZE(power5_generic_events),
- .generic_events = power5_generic_events,
- .cache_events = &power5_cache_events,
+static struct power_pmu power5_pmu = {
+ .name = "POWER5",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x7000090000555ul,
+ .test_adder = 0x3000490000000ul,
+ .compute_mmcr = power5_compute_mmcr,
+ .get_constraint = power5_get_constraint,
+ .get_alternatives = power5_get_alternatives,
+ .disable_pmc = power5_disable_pmc,
+ .n_generic = ARRAY_SIZE(power5_generic_events),
+ .generic_events = power5_generic_events,
+ .cache_events = &power5_cache_events,
};
+
+static int init_power5_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
+ return -ENODEV;
+
+ return register_power_pmu(&power5_pmu);
+}
+
+arch_initcall(init_power5_pmu);
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 46f74be..09ae5bf 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER6
@@ -41,9 +43,9 @@
#define MMCR1_NESTSEL_SH 45
#define MMCR1_NESTSEL_MSK 0x7
#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
-#define MMCR1_PMC1_LLA ((u64)1 << 44)
-#define MMCR1_PMC1_LLA_VALUE ((u64)1 << 39)
-#define MMCR1_PMC1_ADDR_SEL ((u64)1 << 35)
+#define MMCR1_PMC1_LLA (1ul << 44)
+#define MMCR1_PMC1_LLA_VALUE (1ul << 39)
+#define MMCR1_PMC1_ADDR_SEL (1ul << 35)
#define MMCR1_PMC1SEL_SH 24
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
#define MMCR1_PMCSEL_MSK 0xff
@@ -173,10 +175,10 @@
* Assign PMC numbers and compute MMCR1 value for a set of events
*/
static int p6_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
int i;
unsigned int pmc, ev, b, u, s, psel;
unsigned int ttmset = 0;
@@ -215,7 +217,7 @@
/* check for conflict on this byte of event bus */
if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
return -1;
- mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b);
+ mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b);
ttmset |= 1 << b;
if (u == 5) {
/* Nest events have a further mux */
@@ -224,7 +226,7 @@
MMCR1_NESTSEL(mmcr1) != s)
return -1;
ttmset |= 0x10;
- mmcr1 |= (u64)s << MMCR1_NESTSEL_SH;
+ mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH;
}
if (0x30 <= psel && psel <= 0x3d) {
/* these need the PMCx_ADDR_SEL bits */
@@ -243,7 +245,7 @@
if (power6_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
if (pmc < 4)
- mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc);
+ mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc);
}
mmcr[0] = 0;
if (pmc_inuse & 1)
@@ -265,10 +267,11 @@
* 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
* 32-34 select field: nest (subunit) event selector
*/
-static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p6_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, sh, subunit;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -282,11 +285,11 @@
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
sh = byte * 4 + (16 - PM_UNIT_SH);
mask |= PM_UNIT_MSKS << sh;
- value |= (u64)(event & PM_UNIT_MSKS) << sh;
+ value |= (unsigned long)(event & PM_UNIT_MSKS) << sh;
if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
- mask |= (u64)PM_SUBUNIT_MSK << 32;
- value |= (u64)subunit << 32;
+ mask |= (unsigned long)PM_SUBUNIT_MSK << 32;
+ value |= (unsigned long)subunit << 32;
}
}
if (pmc <= 4) {
@@ -458,7 +461,7 @@
return nalt;
}
-static void p6_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
/* Set PMCxSEL to 0 to disable PMCx */
if (pmc <= 3)
@@ -515,18 +518,29 @@
},
};
-struct power_pmu power6_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x1555,
- .test_adder = 0x3000,
- .compute_mmcr = p6_compute_mmcr,
- .get_constraint = p6_get_constraint,
- .get_alternatives = p6_get_alternatives,
- .disable_pmc = p6_disable_pmc,
- .limited_pmc_event = p6_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
- .n_generic = ARRAY_SIZE(power6_generic_events),
- .generic_events = power6_generic_events,
- .cache_events = &power6_cache_events,
+static struct power_pmu power6_pmu = {
+ .name = "POWER6",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x1555,
+ .test_adder = 0x3000,
+ .compute_mmcr = p6_compute_mmcr,
+ .get_constraint = p6_get_constraint,
+ .get_alternatives = p6_get_alternatives,
+ .disable_pmc = p6_disable_pmc,
+ .limited_pmc_event = p6_limited_pmc_event,
+ .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
+ .n_generic = ARRAY_SIZE(power6_generic_events),
+ .generic_events = power6_generic_events,
+ .cache_events = &power6_cache_events,
};
+
+static int init_power6_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
+ return -ENODEV;
+
+ return register_power_pmu(&power6_pmu);
+}
+
+arch_initcall(init_power6_pmu);
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index b72e7a1..5d755ef 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER7
@@ -71,10 +73,11 @@
* 0-9: Count of events needing PMC1..PMC5
*/
-static int power7_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power7_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, sh;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -224,10 +227,10 @@
}
static int power7_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, combine, l2sel, psel;
unsigned int pmc_inuse = 0;
int i;
@@ -265,11 +268,14 @@
--pmc;
}
if (pmc <= 3) {
- mmcr1 |= (u64) unit << (MMCR1_TTM0SEL_SH - 4 * pmc);
- mmcr1 |= (u64) combine << (MMCR1_PMC1_COMBINE_SH - pmc);
+ mmcr1 |= (unsigned long) unit
+ << (MMCR1_TTM0SEL_SH - 4 * pmc);
+ mmcr1 |= (unsigned long) combine
+ << (MMCR1_PMC1_COMBINE_SH - pmc);
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
if (unit == 6) /* L2 events */
- mmcr1 |= (u64) l2sel << MMCR1_L2SEL_SH;
+ mmcr1 |= (unsigned long) l2sel
+ << MMCR1_L2SEL_SH;
}
if (power7_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -287,10 +293,10 @@
return 0;
}
-static void power7_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
- mmcr[1] &= ~(0xffULL << MMCR1_PMCSEL_SH(pmc));
+ mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
}
static int power7_generic_events[] = {
@@ -342,16 +348,27 @@
},
};
-struct power_pmu power7_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT + 1,
- .add_fields = 0x1555ull,
- .test_adder = 0x3000ull,
- .compute_mmcr = power7_compute_mmcr,
- .get_constraint = power7_get_constraint,
- .get_alternatives = power7_get_alternatives,
- .disable_pmc = power7_disable_pmc,
- .n_generic = ARRAY_SIZE(power7_generic_events),
- .generic_events = power7_generic_events,
- .cache_events = &power7_cache_events,
+static struct power_pmu power7_pmu = {
+ .name = "POWER7",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT + 1,
+ .add_fields = 0x1555ul,
+ .test_adder = 0x3000ul,
+ .compute_mmcr = power7_compute_mmcr,
+ .get_constraint = power7_get_constraint,
+ .get_alternatives = power7_get_alternatives,
+ .disable_pmc = power7_disable_pmc,
+ .n_generic = ARRAY_SIZE(power7_generic_events),
+ .generic_events = power7_generic_events,
+ .cache_events = &power7_cache_events,
};
+
+static int init_power7_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
+ return -ENODEV;
+
+ return register_power_pmu(&power7_pmu);
+}
+
+arch_initcall(init_power7_pmu);
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index ba0a357..6637c87 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/string.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for PPC970
@@ -183,7 +185,7 @@
}
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
[PM_FPU] = { 0xc80000000000ull, 0x040000000000ull },
[PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull },
[PM_ISU] = { 0x080000000000ull, 0x020000000000ull },
@@ -192,10 +194,11 @@
[PM_STS] = { 0x380000000000ull, 0x310000000000ull },
};
-static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p970_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh, spcsel;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -222,7 +225,7 @@
grp = byte & 1;
/* Set byte lane select field */
mask |= 0xfULL << (28 - 4 * byte);
- value |= (u64)unit << (28 - 4 * byte);
+ value |= (unsigned long)unit << (28 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2/5/6 field */
@@ -236,7 +239,7 @@
spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
if (spcsel) {
mask |= 3ull << 48;
- value |= (u64)spcsel << 48;
+ value |= (unsigned long)spcsel << 48;
}
*maskp = mask;
*valp = value;
@@ -257,9 +260,9 @@
}
static int p970_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+ unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm, grp;
unsigned int pmc_inuse = 0;
@@ -320,7 +323,7 @@
continue;
ttm = unitmap[i];
++ttmuse[(ttm >> 2) & 1];
- mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH;
}
/* Check only one unit per TTMx */
if (ttmuse[0] > 1 || ttmuse[1] > 1)
@@ -340,7 +343,8 @@
if (unit == PM_LSU1L && byte >= 2)
mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
}
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -386,7 +390,8 @@
for (pmc = 0; pmc < 2; ++pmc)
mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
for (; pmc < 8; ++pmc)
- mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
+ mmcr1 |= (unsigned long)pmcsel[pmc]
+ << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
if (pmc_inuse & 1)
mmcr0 |= MMCR0_PMC1CE;
if (pmc_inuse & 0xfe)
@@ -401,7 +406,7 @@
return 0;
}
-static void p970_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
int shift, i;
@@ -467,16 +472,28 @@
},
};
-struct power_pmu ppc970_pmu = {
- .n_counter = 8,
- .max_alternatives = 2,
- .add_fields = 0x001100005555ull,
- .test_adder = 0x013300000000ull,
- .compute_mmcr = p970_compute_mmcr,
- .get_constraint = p970_get_constraint,
- .get_alternatives = p970_get_alternatives,
- .disable_pmc = p970_disable_pmc,
- .n_generic = ARRAY_SIZE(ppc970_generic_events),
- .generic_events = ppc970_generic_events,
- .cache_events = &ppc970_cache_events,
+static struct power_pmu ppc970_pmu = {
+ .name = "PPC970/FX/MP",
+ .n_counter = 8,
+ .max_alternatives = 2,
+ .add_fields = 0x001100005555ull,
+ .test_adder = 0x013300000000ull,
+ .compute_mmcr = p970_compute_mmcr,
+ .get_constraint = p970_get_constraint,
+ .get_alternatives = p970_get_alternatives,
+ .disable_pmc = p970_disable_pmc,
+ .n_generic = ARRAY_SIZE(ppc970_generic_events),
+ .generic_events = ppc970_generic_events,
+ .cache_events = &ppc970_cache_events,
};
+
+static int init_ppc970_pmu(void)
+{
+ if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
+ && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP"))
+ return -ENODEV;
+
+ return register_power_pmu(&ppc970_pmu);
+}
+
+arch_initcall(init_ppc970_pmu);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 15391c2..eae4511 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,6 +53,7 @@
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/perf_counter.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -525,6 +526,26 @@
}
#endif /* CONFIG_PPC_ISERIES */
+#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
+DEFINE_PER_CPU(u8, perf_counter_pending);
+
+void set_perf_counter_pending(void)
+{
+ get_cpu_var(perf_counter_pending) = 1;
+ set_dec(1);
+ put_cpu_var(perf_counter_pending);
+}
+
+#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
+#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
+
+#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+
+#define test_perf_counter_pending() 0
+#define clear_perf_counter_pending()
+
+#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+
/*
* For iSeries shared processors, we have to let the hypervisor
* set the hardware decrementer. We set a virtual decrementer
@@ -551,6 +572,10 @@
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
+ if (test_perf_counter_pending()) {
+ clear_perf_counter_pending();
+ perf_counter_do_pending();
+ }
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 5beffc8..830bef0 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -302,7 +302,7 @@
* the fault.
*/
survive:
- ret = handle_mm_fault(mm, vma, address, is_write);
+ ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index c419254..61187be 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,7 +1,7 @@
config PPC64
bool "64-bit kernel"
default n
- select HAVE_PERF_COUNTERS
+ select PPC_HAVE_PMU_SUPPORT
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
@@ -78,6 +78,7 @@
config 6xx
def_bool y
depends on PPC32 && PPC_BOOK3S
+ select PPC_HAVE_PMU_SUPPORT
config POWER3
bool
@@ -246,6 +247,15 @@
If in doubt, say Y here.
+config PPC_HAVE_PMU_SUPPORT
+ bool
+
+config PPC_PERF_CTRS
+ def_bool y
+ depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT
+ help
+ This enables the powerpc-specific perf_counter back-end.
+
config SMP
depends on PPC_STD_MMU || FSL_BOOKE
bool "Symmetric multi-processing support"
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c
index 95d8dad..d06ba87 100644
--- a/arch/powerpc/platforms/cell/spu_fault.c
+++ b/arch/powerpc/platforms/cell/spu_fault.c
@@ -70,7 +70,7 @@
}
ret = 0;
- *flt = handle_mm_fault(mm, vma, ea, is_write);
+ *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 9a2a6e3..0e8db67 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -122,7 +122,7 @@
* passed back in "userdata".
*/
-static void eeh_report_error(struct pci_dev *dev, void *userdata)
+static int eeh_report_error(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
@@ -130,19 +130,21 @@
dev->error_state = pci_channel_io_frozen;
if (!driver)
- return;
+ return 0;
eeh_disable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->error_detected)
- return;
+ return 0;
rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
+ return 0;
}
/**
@@ -153,7 +155,7 @@
* Cumulative response passed back in "userdata".
*/
-static void eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
@@ -161,26 +163,28 @@
if (!driver ||
!driver->err_handler ||
!driver->err_handler->mmio_enabled)
- return;
+ return 0;
rc = driver->err_handler->mmio_enabled (dev);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
+ return 0;
}
/**
* eeh_report_reset - tell device that slot has been reset
*/
-static void eeh_report_reset(struct pci_dev *dev, void *userdata)
+static int eeh_report_reset(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
if (!driver)
- return;
+ return 0;
dev->error_state = pci_channel_io_normal;
@@ -188,35 +192,39 @@
if (!driver->err_handler ||
!driver->err_handler->slot_reset)
- return;
+ return 0;
rc = driver->err_handler->slot_reset(dev);
if ((*res == PCI_ERS_RESULT_NONE) ||
(*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
if (*res == PCI_ERS_RESULT_DISCONNECT &&
rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+
+ return 0;
}
/**
* eeh_report_resume - tell device to resume normal operations
*/
-static void eeh_report_resume(struct pci_dev *dev, void *userdata)
+static int eeh_report_resume(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
dev->error_state = pci_channel_io_normal;
if (!driver)
- return;
+ return 0;
eeh_enable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->resume)
- return;
+ return 0;
driver->err_handler->resume(dev);
+
+ return 0;
}
/**
@@ -226,22 +234,24 @@
* dead, and that no further recovery attempts will be made on it.
*/
-static void eeh_report_failure(struct pci_dev *dev, void *userdata)
+static int eeh_report_failure(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
dev->error_state = pci_channel_io_perm_failure;
if (!driver)
- return;
+ return 0;
eeh_disable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->error_detected)
- return;
+ return 0;
driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+
+ return 0;
}
/* ------------------------------------------------------- */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a14dba0..e577839 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -94,6 +94,7 @@
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
+ select HAVE_PERF_COUNTERS
source "init/Kconfig"
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index d401d56..fcba206 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc3
-# Thu Apr 23 09:29:52 2009
+# Linux kernel version: 2.6.30
+# Mon Jun 22 11:08:16 2009
#
CONFIG_SCHED_MC=y
CONFIG_MMU=y
@@ -25,6 +25,7 @@
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_S390=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -90,7 +91,6 @@
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -103,7 +103,14 @@
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_COUNTERS=y
+
+#
+# Performance Counters
+#
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
@@ -119,6 +126,11 @@
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
@@ -150,7 +162,7 @@
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
CONFIG_PREEMPT_NOTIFIERS=y
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# Base setup
@@ -199,6 +211,7 @@
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -218,9 +231,9 @@
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
# I/O subsystem configuration
@@ -257,6 +270,16 @@
# CONFIG_ZFCPDUMP is not set
CONFIG_S390_GUEST=y
CONFIG_SECCOMP=y
+
+#
+# Power Management
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_SLEEP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
CONFIG_NET=y
#
@@ -384,6 +407,7 @@
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
#
@@ -446,6 +470,7 @@
# CAN Device Drivers
#
CONFIG_CAN_VCAN=m
+# CONFIG_CAN_DEV is not set
# CONFIG_CAN_DEBUG_DEVICES is not set
# CONFIG_AF_RXRPC is not set
# CONFIG_WIMAX is not set
@@ -524,10 +549,6 @@
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
@@ -578,7 +599,6 @@
# CONFIG_DM_DELAY is not set
# CONFIG_DM_UEVENT is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_IFB is not set
CONFIG_DUMMY=m
CONFIG_BONDING=m
@@ -595,6 +615,7 @@
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
# CONFIG_TR is not set
@@ -674,6 +695,11 @@
# CONFIG_MONREADER is not set
CONFIG_MONWRITER=m
CONFIG_S390_VMUR=m
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
@@ -683,6 +709,10 @@
# CONFIG_NEW_LEDS is not set
CONFIG_ACCESSIBILITY=y
# CONFIG_AUXDISPLAY is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
@@ -703,11 +733,12 @@
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -865,19 +896,23 @@
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_FTRACE_SYSCALLS=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -886,6 +921,7 @@
CONFIG_SAMPLES=y
# CONFIG_SAMPLE_KOBJECT is not set
# CONFIG_SAMPLE_KPROBES is not set
+# CONFIG_KMEMCHECK is not set
#
# Security options
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index ec917d4..7a3817a 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -178,7 +178,7 @@
}
struct s390_idle_data {
- spinlock_t lock;
+ unsigned int sequence;
unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_time;
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 9450ce6..31ed568 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -248,14 +248,5 @@
#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#endif /* DASD_DEBUG */
-#undef DEBUG_MALLOC
-#ifdef DEBUG_MALLOC
-void *b;
-#define kmalloc(x...) (PRINT_INFO(" kmalloc %p\n",b=kmalloc(x)),b)
-#define kfree(x) PRINT_INFO(" kfree %p\n",x);kfree(x)
-#define get_zeroed_page(x...) (PRINT_INFO(" gfp %p\n",b=get_zeroed_page(x)),b)
-#define __get_free_pages(x...) (PRINT_INFO(" gfps %p\n",b=__get_free_pages(x)),b)
-#endif /* DEBUG_MALLOC */
-
#endif /* __KERNEL__ */
#endif /* DEBUG_H */
diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h
new file mode 100644
index 0000000..a7205a3
--- /dev/null
+++ b/arch/s390/include/asm/perf_counter.h
@@ -0,0 +1,8 @@
+/*
+ * Performance counter support - s390 specific definitions.
+ *
+ * Copyright 2009 Martin Schwidefsky, IBM Corporation.
+ */
+
+static inline void set_perf_counter_pending(void) {}
+static inline void clear_perf_counter_pending(void) {}
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 402d6dc..79d849f 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -380,7 +380,7 @@
extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count);
+ int q_nr, unsigned int bufnr, unsigned int count);
extern int qdio_cleanup(struct ccw_device*, int);
extern int qdio_shutdown(struct ccw_device*, int);
extern int qdio_free(struct ccw_device *);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 82ddfd3..3e298e6 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -190,7 +190,7 @@
goto out;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
goto out;
- if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
+ if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out;
trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
/* Only trace if the calling function expects to. */
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 9bb2f62..86783ef 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -154,39 +154,35 @@
static int __kprobes swap_instruction(void *aref)
{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long status = kcb->kprobe_status;
struct ins_replace_args *args = aref;
+ int rc;
- return probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
+ kcb->kprobe_status = KPROBE_SWAP_INST;
+ rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
+ kcb->kprobe_status = status;
+ return rc;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long status = kcb->kprobe_status;
struct ins_replace_args args;
args.ptr = p->addr;
args.old = p->opcode;
args.new = BREAKPOINT_INSTRUCTION;
-
- kcb->kprobe_status = KPROBE_SWAP_INST;
stop_machine(swap_instruction, &args, NULL);
- kcb->kprobe_status = status;
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long status = kcb->kprobe_status;
struct ins_replace_args args;
args.ptr = p->addr;
args.old = BREAKPOINT_INSTRUCTION;
args.new = p->opcode;
-
- kcb->kprobe_status = KPROBE_SWAP_INST;
stop_machine(swap_instruction, &args, NULL);
- kcb->kprobe_status = status;
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index fd8e311..2270730 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -856,13 +856,20 @@
{
struct s390_idle_data *idle;
unsigned long long idle_count;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock(&idle->lock);
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_count = idle->idle_count;
if (idle->idle_enter)
idle_count++;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return sprintf(buf, "%llu\n", idle_count);
}
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -872,15 +879,22 @@
{
struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock(&idle->lock);
now = get_clock();
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_time = idle->idle_time;
idle_enter = idle->idle_enter;
if (idle_enter != 0ULL && idle_enter < now)
idle_time += now - idle_enter;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -908,11 +922,7 @@
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
idle = &per_cpu(s390_idle, cpu);
- spin_lock_irq(&idle->lock);
- idle->idle_enter = 0;
- idle->idle_time = 0;
- idle->idle_count = 0;
- spin_unlock_irq(&idle->lock);
+ memset(idle, 0, sizeof(struct s390_idle_data));
if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
return NOTIFY_BAD;
break;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 215330a..d4c8e9c 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -36,7 +36,6 @@
#include <linux/notifier.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
-#include <linux/bootmem.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
@@ -62,9 +61,6 @@
u64 sched_clock_base_cc = -1; /* Force to data section. */
-static ext_int_info_t ext_int_info_cc;
-static ext_int_info_t ext_int_etr_cc;
-
static DEFINE_PER_CPU(struct clock_event_device, comparators);
/*
@@ -255,15 +251,11 @@
stp_reset();
/* request the clock comparator external interrupt */
- if (register_early_external_interrupt(0x1004,
- clock_comparator_interrupt,
- &ext_int_info_cc) != 0)
+ if (register_external_interrupt(0x1004, clock_comparator_interrupt))
panic("Couldn't request external interrupt 0x1004");
/* request the timing alert external interrupt */
- if (register_early_external_interrupt(0x1406,
- timing_alert_interrupt,
- &ext_int_etr_cc) != 0)
+ if (register_external_interrupt(0x1406, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
if (clocksource_register(&clocksource_tod) != 0)
@@ -1445,14 +1437,14 @@
{
int rc;
- stp_page = alloc_bootmem_pages(PAGE_SIZE);
+ stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
pr_warning("The real or virtual hardware system does "
"not provide an STP interface\n");
- free_bootmem((unsigned long) stp_page, PAGE_SIZE);
+ free_page((unsigned long) stp_page);
stp_page = NULL;
stp_online = 0;
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c8eb725..c41bb0d 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,13 +25,9 @@
#include <asm/irq_regs.h>
#include <asm/cputime.h>
-static ext_int_info_t ext_int_info_timer;
-
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
- .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
-};
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
static inline __u64 get_vtimer(void)
{
@@ -153,11 +149,13 @@
vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
}
- spin_lock(&idle->lock);
+ idle->sequence++;
+ smp_wmb();
idle->idle_time += idle_time;
idle->idle_enter = 0ULL;
idle->idle_count++;
- spin_unlock(&idle->lock);
+ smp_wmb();
+ idle->sequence++;
}
void vtime_stop_cpu(void)
@@ -244,15 +242,23 @@
{
struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, cpu);
- spin_lock(&idle->lock);
+
now = get_clock();
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_time = 0;
idle_enter = idle->idle_enter;
if (idle_enter != 0ULL && idle_enter < now)
idle_time = now - idle_enter;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return idle_time;
}
@@ -557,8 +563,7 @@
void __init vtime_init(void)
{
/* request the cpu timer external interrupt */
- if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
- &ext_int_info_timer) != 0)
+ if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
panic("Couldn't request external interrupt 0x1005");
/* Enable cpu timer interrupts on the boot cpu. */
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index b0b84c3..cb5d59e 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -66,7 +66,7 @@
}
survive:
- fault = handle_mm_fault(mm, vma, address, write_access);
+ fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 220a152..74eb26b 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -352,7 +352,7 @@
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
up_read(&mm->mmap_sem);
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
index 3c74e7d..76d688d 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/power/swsusp_asm64.S
@@ -109,10 +109,11 @@
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
+#ifdef CONFIG_SMP
/* Save boot cpu number */
brasl %r14,smp_get_phys_cpu_id
lgr %r10,%r2
-
+#endif
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
@@ -177,11 +178,12 @@
/* Pointer to save arae */
lghi %r13,0x1000
+#ifdef CONFIG_SMP
/* Switch CPUs */
lgr %r2,%r10 /* get cpu id */
llgf %r3,0x318(%r13)
brasl %r14,smp_switch_boot_cpu_in_resume
-
+#endif
/* Restore prefix register */
spx 0x318(%r13)
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 2c50f80..cc8ddbd 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -133,7 +133,7 @@
* the fault.
*/
survive:
- fault = handle_mm_fault(mm, vma, address, writeaccess);
+ fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 7876997..fcbb6e1 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -187,7 +187,7 @@
* the fault.
*/
survive:
- fault = handle_mm_fault(mm, vma, address, writeaccess);
+ fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 12e447f..a5e30c6 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -241,7 +241,7 @@
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -484,7 +484,7 @@
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
- switch (handle_mm_fault(mm, vma, address, write)) {
+ switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
case VM_FAULT_SIGBUS:
case VM_FAULT_OOM:
goto do_sigbus;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 4ab8993..e5620b2 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -398,7 +398,7 @@
goto bad_area;
}
- fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
+ fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 7384d8a..637c650 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -65,7 +65,7 @@
do {
int fault;
- fault = handle_mm_fault(mm, vma, address, is_write);
+ fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 73c0bda..d1430ef 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -34,6 +34,7 @@
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_GRAPH_FP_TEST
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
select HAVE_FTRACE_SYSCALLS
diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S
index 5077937..1dfbf64 100644
--- a/arch/x86/boot/bioscall.S
+++ b/arch/x86/boot/bioscall.S
@@ -13,7 +13,7 @@
* touching registers they shouldn't be.
*/
- .code16
+ .code16gcc
.text
.globl intcall
.type intcall, @function
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index caba996..eb0566e 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -845,7 +845,7 @@
*/
ENTRY(aesni_cbc_dec)
cmp $16, LEN
- jb .Lcbc_dec_ret
+ jb .Lcbc_dec_just_ret
mov 480(KEYP), KLEN
add $240, KEYP
movups (IVP), IV
@@ -891,6 +891,7 @@
add $16, OUTP
cmp $16, LEN
jge .Lcbc_dec_loop1
- movups IV, (IVP)
.Lcbc_dec_ret:
+ movups IV, (IVP)
+.Lcbc_dec_just_ret:
ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 4e66339..c580c5e 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -198,6 +198,7 @@
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -221,6 +222,7 @@
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -266,6 +268,7 @@
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -289,6 +292,7 @@
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index 5f9781a..daef6cd 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -48,7 +48,7 @@
struct blkcipher_desc desc = {
.tfm = child,
.info = desc_in->info,
- .flags = desc_in->flags,
+ .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
};
kernel_fpu_begin();
@@ -67,7 +67,7 @@
struct blkcipher_desc desc = {
.tfm = child,
.info = desc_in->info,
- .flags = desc_in->flags,
+ .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
};
kernel_fpu_begin();
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index 262e028..bdf96f1 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -29,9 +29,11 @@
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_flush_all_domains(void);
extern void amd_iommu_flush_all_devices(void);
+extern void amd_iommu_shutdown(void);
#else
static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { }
+static inline void amd_iommu_shutdown(void) { }
#endif
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
index 8cb9c81..2503d4e 100644
--- a/arch/x86/include/asm/atomic_32.h
+++ b/arch/x86/include/asm/atomic_32.h
@@ -257,7 +257,7 @@
/**
* atomic64_read - read atomic64 variable
- * @v: pointer of type atomic64_t
+ * @ptr: pointer of type atomic64_t
*
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
@@ -294,7 +294,6 @@
* atomic64_xchg - xchg atomic64 variable
* @ptr: pointer to type atomic64_t
* @new_val: value to assign
- * @old_val: old value that was there
*
* Atomically xchgs the value of @ptr to @new_val and returns
* the old value.
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index c45f415..c993e9e 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_DESC_H
#define _ASM_X86_DESC_H
-#ifndef __ASSEMBLY__
#include <asm/desc_defs.h>
#include <asm/ldt.h>
#include <asm/mmu.h>
@@ -380,29 +379,4 @@
_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
}
-#else
-/*
- * GET_DESC_BASE reads the descriptor base of the specified segment.
- *
- * Args:
- * idx - descriptor index
- * gdt - GDT pointer
- * base - 32bit register to which the base will be written
- * lo_w - lo word of the "base" register
- * lo_b - lo byte of the "base" register
- * hi_b - hi byte of the low word of the "base" register
- *
- * Example:
- * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
- * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
- */
-#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
- movb idx * 8 + 4(gdt), lo_b; \
- movb idx * 8 + 7(gdt), hi_b; \
- shll $16, base; \
- movw idx * 8 + 2(gdt), lo_w;
-
-
-#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_X86_DESC_H */
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index af326a2..fd6d21b 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,6 +6,7 @@
extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
+extern int iommu_pass_through;
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 540a466..5cdd8d1 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -102,15 +102,39 @@
#ifdef __KERNEL__
-extern int mce_disabled;
-
-#include <asm/atomic.h>
#include <linux/percpu.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+
+extern int mce_disabled;
+extern int mce_p5_enabled;
+
+#ifdef CONFIG_X86_MCE
+void mcheck_init(struct cpuinfo_x86 *c);
+#else
+static inline void mcheck_init(struct cpuinfo_x86 *c) {}
+#endif
+
+#ifdef CONFIG_X86_OLD_MCE
+extern int nr_mce_banks;
+void amd_mcheck_init(struct cpuinfo_x86 *c);
+void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
+void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
+#endif
+
+#ifdef CONFIG_X86_ANCIENT_MCE
+void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
+void winchip_mcheck_init(struct cpuinfo_x86 *c);
+static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
+#else
+static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
+static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
+static inline void enable_p5_mce(void) {}
+#endif
void mce_setup(struct mce *m);
void mce_log(struct mce *m);
DECLARE_PER_CPU(struct sys_device, mce_dev);
-extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
/*
* To support more than 128 would need to escape the predefined
@@ -145,12 +169,8 @@
DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);
-void mce_log_therm_throt_event(__u64 status);
-
extern atomic_t mce_entry;
-void do_machine_check(struct pt_regs *, long);
-
typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
@@ -167,13 +187,32 @@
DECLARE_PER_CPU(struct mce, injectm);
extern struct file_operations mce_chrdev_ops;
-#ifdef CONFIG_X86_MCE
-void mcheck_init(struct cpuinfo_x86 *c);
-#else
-#define mcheck_init(c) do { } while (0)
-#endif
+/*
+ * Exception handler
+ */
+
+/* Call the installed machine check handler for this CPU setup. */
+extern void (*machine_check_vector)(struct pt_regs *, long error_code);
+void do_machine_check(struct pt_regs *, long);
+
+/*
+ * Threshold handler
+ */
extern void (*mce_threshold_vector)(void);
+extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
+
+/*
+ * Thermal handler
+ */
+
+void intel_init_thermal(struct cpuinfo_x86 *c);
+
+#ifdef CONFIG_X86_NEW_MCE
+void mce_log_therm_throt_event(__u64 status);
+#else
+static inline void mce_log_therm_throt_event(__u64 status) {}
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 2260376..48ad9d2 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -3,13 +3,10 @@
#include <asm/msr-index.h>
-#ifndef __ASSEMBLY__
-# include <linux/types.h>
-#endif
-
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+#include <linux/types.h>
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/cpumask.h>
@@ -264,6 +261,4 @@
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
-
-
#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8d382d3..7639dbf 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -41,7 +41,7 @@
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
-#define __VIRTUAL_MASK_SHIFT 48
+#define __VIRTUAL_MASK_SHIFT 47
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index b51a1e8..927958d 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -130,6 +130,7 @@
/* generic pci stuff */
#include <asm-generic/pci.h>
+#define PCIBIOS_MAX_MEM_32 0xffffffff
#ifdef CONFIG_NUMA
/* Returns the node based on pci bus */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index e60fd3e..cb739cc 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -25,7 +25,7 @@
#define PCI_BIOS_IRQ_SCAN 0x2000
#define PCI_ASSIGN_ALL_BUSSES 0x4000
#define PCI_CAN_SKIP_ISA_ALIGN 0x8000
-#define PCI_USE__CRS 0x10000
+#define PCI_NO_ROOT_CRS 0x10000
#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000
#define PCI_HAS_IO_ECS 0x40000
#define PCI_NOASSIGN_ROMS 0x80000
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index 876ed97..5fb33e1 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -84,11 +84,6 @@
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
-extern void set_perf_counter_pending(void);
-
-#define clear_perf_counter_pending() do { } while (0)
-#define test_perf_counter_pending() (0)
-
#ifdef CONFIG_PERF_COUNTERS
extern void init_hw_perf_counters(void);
extern void perf_counters_lapic_init(void);
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 31bd120..01fd946 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -49,13 +49,17 @@
#endif
#if defined(CONFIG_HIGHPTE)
+#define __KM_PTE \
+ (in_nmi() ? KM_NMI_PTE : \
+ in_irq() ? KM_IRQ_PTE : \
+ KM_PTE0)
#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
pte_index((address)))
#define pte_offset_map_nested(dir, address) \
((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0)
+#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
#else
#define pte_offset_map(dir, address) \
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index abde308..c57a301 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -165,10 +165,7 @@
/* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
-#define kc_offset_to_vaddr(o) \
- (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \
- ? ((o) | ~__VIRTUAL_MASK) \
- : (o))
+#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
#define __HAVE_ARCH_PTE_SAME
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/therm_throt.h b/arch/x86/include/asm/therm_throt.h
deleted file mode 100644
index c62349e..0000000
--- a/arch/x86/include/asm/therm_throt.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ASM_X86_THERM_THROT_H
-#define _ASM_X86_THERM_THROT_H
-
-#include <asm/atomic.h>
-
-extern atomic_t therm_throt_en;
-int therm_throt_process(int curr);
-
-#endif /* _ASM_X86_THERM_THROT_H */
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index bd37ed4..20ca9c4 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -45,12 +45,16 @@
*/
DECLARE_PER_CPU(unsigned long, cyc2ns);
+DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
{
- return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR;
+ int cpu = smp_processor_id();
+ unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
+ ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+ return ns;
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index b685ece..20e6a79 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -25,7 +25,7 @@
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_DS MAKE_MM_SEG(-1UL)
-#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 1c60554..9372f04 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -434,6 +434,16 @@
iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
}
+/* Flush the whole IO/TLB for a given protection domain - including PDE */
+static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
+{
+ u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+
+ INC_STATS_COUNTER(domain_flush_single);
+
+ iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
+}
+
/*
* This function is used to flush the IO/TLB for a given protection domain
* on every IOMMU in the system
@@ -1078,7 +1088,13 @@
amd_iommu_pd_table[devid] = domain;
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ /*
+ * We might boot into a crash-kernel here. The crashed kernel
+ * left the caches in the IOMMU dirty. So we have to flush
+ * here to evict all dirty stuff.
+ */
iommu_queue_inv_dev_entry(iommu, devid);
+ iommu_flush_tlb_pde(iommu, domain->id);
}
/*
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 238989e..10b2acc 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -260,6 +260,14 @@
static void iommu_disable(struct amd_iommu *iommu)
{
+ /* Disable command buffer */
+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+ /* Disable event logging and event interrupts */
+ iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+
+ /* Disable IOMMU hardware itself */
iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
}
@@ -478,6 +486,10 @@
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
&entry, sizeof(entry));
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
}
@@ -1042,6 +1054,7 @@
struct amd_iommu *iommu;
for_each_iommu(iommu) {
+ iommu_disable(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
@@ -1066,12 +1079,6 @@
static int amd_iommu_resume(struct sys_device *dev)
{
- /*
- * Disable IOMMUs before reprogramming the hardware registers.
- * IOMMU is still enabled from the resume kernel.
- */
- disable_iommus();
-
/* re-load the hardware */
enable_iommus();
@@ -1079,8 +1086,8 @@
* we have to flush after the IOMMUs are enabled because a
* disabled IOMMU will never execute the commands we send
*/
- amd_iommu_flush_all_domains();
amd_iommu_flush_all_devices();
+ amd_iommu_flush_all_domains();
return 0;
}
@@ -1273,6 +1280,11 @@
goto out;
}
+void amd_iommu_shutdown(void)
+{
+ disable_iommus();
+}
+
/****************************************************************************
*
* Early detect code. This code runs at IOMMU detection time in the DMA
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ef8d929..b7a7920 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -462,7 +462,8 @@
static void
__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
- union entry_union eu;
+ union entry_union eu = {{0, 0}};
+
eu.entry = e;
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
@@ -2003,7 +2004,9 @@
/*
* Use virtual wire A mode when interrupt remapping is enabled.
*/
- disconnect_bsp_APIC(!intr_remapping_enabled && ioapic_i8259.pin != -1);
+ if (cpu_has_apic)
+ disconnect_bsp_APIC(!intr_remapping_enabled &&
+ ioapic_i8259.pin != -1);
}
#ifdef CONFIG_X86_32
@@ -3567,7 +3570,7 @@
#endif /* CONFIG_SMP */
-struct irq_chip dmar_msi_type = {
+static struct irq_chip dmar_msi_type = {
.name = "DMAR_MSI",
.unmask = dmar_msi_unmask,
.mask = dmar_msi_mask,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 440a8bc..0c0182c 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -20,23 +20,12 @@
#include <asm/apic.h>
#include <asm/setup.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
#include <linux/smp.h>
-#include <linux/init.h>
#include <asm/ipi.h>
-#include <linux/smp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/acpi.h>
#include <asm/e820.h>
-#include <asm/setup.h>
#ifdef CONFIG_HOTPLUG_CPU
#define DEFAULT_SEND_IPI (1)
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 344eee4..eafdfbd1 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -44,7 +44,6 @@
#include <asm/ipi.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/smp.h>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9fa3388..6b26d4d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -108,7 +108,7 @@
/* data */
[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
- [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
+ [GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } },
[GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
GDT_STACK_CANARY_INIT
#endif
@@ -848,6 +848,9 @@
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
numa_add_cpu(smp_processor_id());
#endif
+
+ /* Cap the iomem address space to what is addressable on all CPUs */
+ iomem_resource.end &= (1ULL << c->x86_phys_bits) - 1;
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index 45004fa..188a1ca 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -1,11 +1,12 @@
-obj-y = mce.o therm_throt.o
+obj-y = mce.o
obj-$(CONFIG_X86_NEW_MCE) += mce-severity.o
obj-$(CONFIG_X86_OLD_MCE) += k7.o p4.o p6.o
obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o
-obj-$(CONFIG_X86_MCE_P4THERMAL) += mce_intel.o
-obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o mce_intel.o
-obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o
+obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
+obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
+
+obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c
index 89e5104..b945d5d 100644
--- a/arch/x86/kernel/cpu/mcheck/k7.c
+++ b/arch/x86/kernel/cpu/mcheck/k7.c
@@ -10,10 +10,9 @@
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/mce.h>
#include <asm/msr.h>
-#include "mce.h"
-
/* Machine Check Handler For AMD Athlon/Duron: */
static void k7_machine_check(struct pt_regs *regs, long error_code)
{
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index fabba15..284d1de 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -44,7 +44,6 @@
#include <asm/msr.h>
#include "mce-internal.h"
-#include "mce.h"
/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
@@ -57,7 +56,7 @@
void (*machine_check_vector)(struct pt_regs *, long error_code) =
unexpected_machine_check;
-int mce_disabled;
+int mce_disabled __read_mostly;
#ifdef CONFIG_X86_NEW_MCE
@@ -76,21 +75,22 @@
* 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
* 3: never panic or SIGBUS, log all errors (for testing only)
*/
-static int tolerant = 1;
-static int banks;
-static u64 *bank;
-static unsigned long notify_user;
-static int rip_msr;
-static int mce_bootlog = -1;
-static int monarch_timeout = -1;
-static int mce_panic_timeout;
-static int mce_dont_log_ce;
-int mce_cmci_disabled;
-int mce_ignore_ce;
-int mce_ser;
+static int tolerant __read_mostly = 1;
+static int banks __read_mostly;
+static u64 *bank __read_mostly;
+static int rip_msr __read_mostly;
+static int mce_bootlog __read_mostly = -1;
+static int monarch_timeout __read_mostly = -1;
+static int mce_panic_timeout __read_mostly;
+static int mce_dont_log_ce __read_mostly;
+int mce_cmci_disabled __read_mostly;
+int mce_ignore_ce __read_mostly;
+int mce_ser __read_mostly;
-static char trigger[128];
-static char *trigger_argv[2] = { trigger, NULL };
+/* User mode helper program triggered by machine check event */
+static unsigned long mce_need_notify;
+static char mce_helper[128];
+static char *mce_helper_argv[2] = { mce_helper, NULL };
static unsigned long dont_init_banks;
@@ -180,7 +180,7 @@
wmb();
mce->finished = 1;
- set_bit(0, ¬ify_user);
+ set_bit(0, &mce_need_notify);
}
static void print_mce(struct mce *m)
@@ -691,18 +691,21 @@
* in the entry order.
* TBD double check parallel CPU hotunplug
*/
-static int mce_start(int no_way_out, int *order)
+static int mce_start(int *no_way_out)
{
- int nwo;
+ int order;
int cpus = num_online_cpus();
u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
- if (!timeout) {
- *order = -1;
- return no_way_out;
- }
+ if (!timeout)
+ return -1;
- atomic_add(no_way_out, &global_nwo);
+ atomic_add(*no_way_out, &global_nwo);
+ /*
+ * global_nwo should be updated before mce_callin
+ */
+ smp_wmb();
+ order = atomic_add_return(1, &mce_callin);
/*
* Wait for everyone.
@@ -710,40 +713,43 @@
while (atomic_read(&mce_callin) != cpus) {
if (mce_timed_out(&timeout)) {
atomic_set(&global_nwo, 0);
- *order = -1;
- return no_way_out;
+ return -1;
}
ndelay(SPINUNIT);
}
/*
+ * mce_callin should be read before global_nwo
+ */
+ smp_rmb();
+
+ if (order == 1) {
+ /*
+ * Monarch: Starts executing now, the others wait.
+ */
+ atomic_set(&mce_executing, 1);
+ } else {
+ /*
+ * Subject: Now start the scanning loop one by one in
+ * the original callin order.
+ * This way when there are any shared banks it will be
+ * only seen by one CPU before cleared, avoiding duplicates.
+ */
+ while (atomic_read(&mce_executing) < order) {
+ if (mce_timed_out(&timeout)) {
+ atomic_set(&global_nwo, 0);
+ return -1;
+ }
+ ndelay(SPINUNIT);
+ }
+ }
+
+ /*
* Cache the global no_way_out state.
*/
- nwo = atomic_read(&global_nwo);
+ *no_way_out = atomic_read(&global_nwo);
- /*
- * Monarch starts executing now, the others wait.
- */
- if (*order == 1) {
- atomic_set(&mce_executing, 1);
- return nwo;
- }
-
- /*
- * Now start the scanning loop one by one
- * in the original callin order.
- * This way when there are any shared banks it will
- * be only seen by one CPU before cleared, avoiding duplicates.
- */
- while (atomic_read(&mce_executing) < *order) {
- if (mce_timed_out(&timeout)) {
- atomic_set(&global_nwo, 0);
- *order = -1;
- return no_way_out;
- }
- ndelay(SPINUNIT);
- }
- return nwo;
+ return order;
}
/*
@@ -863,7 +869,6 @@
* check handler.
*/
int order;
-
/*
* If no_way_out gets set, there is no safe way to recover from this
* MCE. If tolerant is cranked up, we'll try anyway.
@@ -887,7 +892,6 @@
if (!banks)
goto out;
- order = atomic_add_return(1, &mce_callin);
mce_setup(&m);
m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
@@ -909,7 +913,7 @@
* This way we don't report duplicated events on shared banks
* because the first one to see it will clear it.
*/
- no_way_out = mce_start(no_way_out, &order);
+ order = mce_start(&no_way_out);
for (i = 0; i < banks; i++) {
__clear_bit(i, toclear);
if (!bank[i])
@@ -1118,7 +1122,7 @@
static void mce_do_trigger(struct work_struct *work)
{
- call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
+ call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
}
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -1135,7 +1139,7 @@
clear_thread_flag(TIF_MCE_NOTIFY);
- if (test_and_clear_bit(0, ¬ify_user)) {
+ if (test_and_clear_bit(0, &mce_need_notify)) {
wake_up_interruptible(&mce_wait);
/*
@@ -1143,7 +1147,7 @@
* work_pending is always cleared before the function is
* executed.
*/
- if (trigger[0] && !work_pending(&mce_trigger_work))
+ if (mce_helper[0] && !work_pending(&mce_trigger_work))
schedule_work(&mce_trigger_work);
if (__ratelimit(&ratelimit))
@@ -1245,7 +1249,7 @@
* Various K7s with broken bank 0 around. Always disable
* by default.
*/
- if (c->x86 == 6)
+ if (c->x86 == 6 && banks > 0)
bank[0] = 0;
}
@@ -1282,8 +1286,7 @@
return;
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
- if (mce_p5_enabled())
- intel_p5_mcheck_init(c);
+ intel_p5_mcheck_init(c);
break;
case X86_VENDOR_CENTAUR:
winchip_mcheck_init(c);
@@ -1609,8 +1612,9 @@
static void mce_cpu_restart(void *data)
{
del_timer_sync(&__get_cpu_var(mce_timer));
- if (mce_available(¤t_cpu_data))
- mce_init();
+ if (!mce_available(¤t_cpu_data))
+ return;
+ mce_init();
mce_init_timer();
}
@@ -1620,6 +1624,26 @@
on_each_cpu(mce_cpu_restart, NULL, 1);
}
+/* Toggle features for corrected errors */
+static void mce_disable_ce(void *all)
+{
+ if (!mce_available(¤t_cpu_data))
+ return;
+ if (all)
+ del_timer_sync(&__get_cpu_var(mce_timer));
+ cmci_clear();
+}
+
+static void mce_enable_ce(void *all)
+{
+ if (!mce_available(¤t_cpu_data))
+ return;
+ cmci_reenable();
+ cmci_recheck();
+ if (all)
+ mce_init_timer();
+}
+
static struct sysdev_class mce_sysclass = {
.suspend = mce_suspend,
.shutdown = mce_shutdown,
@@ -1659,9 +1683,9 @@
static ssize_t
show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
{
- strcpy(buf, trigger);
+ strcpy(buf, mce_helper);
strcat(buf, "\n");
- return strlen(trigger) + 1;
+ return strlen(mce_helper) + 1;
}
static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
@@ -1670,10 +1694,10 @@
char *p;
int len;
- strncpy(trigger, buf, sizeof(trigger));
- trigger[sizeof(trigger)-1] = 0;
- len = strlen(trigger);
- p = strchr(trigger, '\n');
+ strncpy(mce_helper, buf, sizeof(mce_helper));
+ mce_helper[sizeof(mce_helper)-1] = 0;
+ len = strlen(mce_helper);
+ p = strchr(mce_helper, '\n');
if (*p)
*p = 0;
@@ -1681,6 +1705,52 @@
return len;
}
+static ssize_t set_ignore_ce(struct sys_device *s,
+ struct sysdev_attribute *attr,
+ const char *buf, size_t size)
+{
+ u64 new;
+
+ if (strict_strtoull(buf, 0, &new) < 0)
+ return -EINVAL;
+
+ if (mce_ignore_ce ^ !!new) {
+ if (new) {
+ /* disable ce features */
+ on_each_cpu(mce_disable_ce, (void *)1, 1);
+ mce_ignore_ce = 1;
+ } else {
+ /* enable ce features */
+ mce_ignore_ce = 0;
+ on_each_cpu(mce_enable_ce, (void *)1, 1);
+ }
+ }
+ return size;
+}
+
+static ssize_t set_cmci_disabled(struct sys_device *s,
+ struct sysdev_attribute *attr,
+ const char *buf, size_t size)
+{
+ u64 new;
+
+ if (strict_strtoull(buf, 0, &new) < 0)
+ return -EINVAL;
+
+ if (mce_cmci_disabled ^ !!new) {
+ if (new) {
+ /* disable cmci */
+ on_each_cpu(mce_disable_ce, NULL, 1);
+ mce_cmci_disabled = 1;
+ } else {
+ /* enable cmci */
+ mce_cmci_disabled = 0;
+ on_each_cpu(mce_enable_ce, NULL, 1);
+ }
+ }
+ return size;
+}
+
static ssize_t store_int_with_restart(struct sys_device *s,
struct sysdev_attribute *attr,
const char *buf, size_t size)
@@ -1693,6 +1763,7 @@
static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
+static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
static struct sysdev_ext_attribute attr_check_interval = {
_SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
@@ -1700,9 +1771,24 @@
&check_interval
};
+static struct sysdev_ext_attribute attr_ignore_ce = {
+ _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
+ &mce_ignore_ce
+};
+
+static struct sysdev_ext_attribute attr_cmci_disabled = {
+ _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
+ &mce_cmci_disabled
+};
+
static struct sysdev_attribute *mce_attrs[] = {
- &attr_tolerant.attr, &attr_check_interval.attr, &attr_trigger,
+ &attr_tolerant.attr,
+ &attr_check_interval.attr,
+ &attr_trigger,
&attr_monarch_timeout.attr,
+ &attr_dont_log_ce.attr,
+ &attr_ignore_ce.attr,
+ &attr_cmci_disabled.attr,
NULL
};
@@ -1712,7 +1798,7 @@
static __cpuinit int mce_create_device(unsigned int cpu)
{
int err;
- int i;
+ int i, j;
if (!mce_available(&boot_cpu_data))
return -EIO;
@@ -1730,9 +1816,9 @@
if (err)
goto error;
}
- for (i = 0; i < banks; i++) {
+ for (j = 0; j < banks; j++) {
err = sysdev_create_file(&per_cpu(mce_dev, cpu),
- &bank_attrs[i]);
+ &bank_attrs[j]);
if (err)
goto error2;
}
@@ -1740,8 +1826,8 @@
return 0;
error2:
- while (--i >= 0)
- sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]);
+ while (--j >= 0)
+ sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[j]);
error:
while (--i >= 0)
sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
@@ -1883,7 +1969,7 @@
if (!mce_available(&boot_cpu_data))
return -EIO;
- alloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
+ zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
err = mce_init_banks();
if (err)
@@ -1915,7 +2001,7 @@
/* This has to be run for each processor */
void mcheck_init(struct cpuinfo_x86 *c)
{
- if (mce_disabled == 1)
+ if (mce_disabled)
return;
switch (c->x86_vendor) {
@@ -1945,10 +2031,9 @@
static int __init mcheck_enable(char *str)
{
- mce_disabled = -1;
+ mce_p5_enabled = 1;
return 1;
}
-
__setup("mce", mcheck_enable);
#endif /* CONFIG_X86_OLD_MCE */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.h b/arch/x86/kernel/cpu/mcheck/mce.h
deleted file mode 100644
index 84a552b..0000000
--- a/arch/x86/kernel/cpu/mcheck/mce.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#include <linux/init.h>
-#include <asm/mce.h>
-
-#ifdef CONFIG_X86_OLD_MCE
-void amd_mcheck_init(struct cpuinfo_x86 *c);
-void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
-void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
-#endif
-
-#ifdef CONFIG_X86_ANCIENT_MCE
-void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
-void winchip_mcheck_init(struct cpuinfo_x86 *c);
-extern int mce_p5_enable;
-static inline int mce_p5_enabled(void) { return mce_p5_enable; }
-static inline void enable_p5_mce(void) { mce_p5_enable = 1; }
-#else
-static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
-static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
-static inline int mce_p5_enabled(void) { return 0; }
-static inline void enable_p5_mce(void) { }
-#endif
-
-/* Call the installed machine check handler for this CPU setup. */
-extern void (*machine_check_vector)(struct pt_regs *, long error_code);
-
-#ifdef CONFIG_X86_OLD_MCE
-
-extern int nr_mce_banks;
-
-void intel_set_thermal_handler(void);
-
-#else
-
-static inline void intel_set_thermal_handler(void) { }
-
-#endif
-
-void intel_init_thermal(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
similarity index 100%
rename from arch/x86/kernel/cpu/mcheck/mce_amd_64.c
rename to arch/x86/kernel/cpu/mcheck/mce_amd.c
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 2b011d2..e1acec0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -1,74 +1,226 @@
/*
- * Common code for Intel machine checks
+ * Intel specific MCE features.
+ * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
+ * Copyright (C) 2008, 2009 Intel Corporation
+ * Author: Andi Kleen
*/
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
+
#include <linux/init.h>
-#include <linux/smp.h>
-
-#include <asm/therm_throt.h>
-#include <asm/processor.h>
-#include <asm/system.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
#include <asm/apic.h>
+#include <asm/processor.h>
#include <asm/msr.h>
+#include <asm/mce.h>
-#include "mce.h"
+/*
+ * Support for Intel Correct Machine Check Interrupts. This allows
+ * the CPU to raise an interrupt when a corrected machine check happened.
+ * Normally we pick those up using a regular polling timer.
+ * Also supports reliable discovery of shared banks.
+ */
-void intel_init_thermal(struct cpuinfo_x86 *c)
+static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
+
+/*
+ * cmci_discover_lock protects against parallel discovery attempts
+ * which could race against each other.
+ */
+static DEFINE_SPINLOCK(cmci_discover_lock);
+
+#define CMCI_THRESHOLD 1
+
+static int cmci_supported(int *banks)
{
- unsigned int cpu = smp_processor_id();
- int tm2 = 0;
- u32 l, h;
+ u64 cap;
- /* Thermal monitoring depends on ACPI and clock modulation*/
- if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
- return;
+ if (mce_cmci_disabled || mce_ignore_ce)
+ return 0;
/*
- * First check if its enabled already, in which case there might
- * be some SMM goo which handles it, so we can't even put a handler
- * since it might be delivered via SMI already:
+ * Vendor check is not strictly needed, but the initial
+ * initialization is vendor keyed and this
+ * makes sure none of the backdoors are entered otherwise.
*/
- rdmsr(MSR_IA32_MISC_ENABLE, l, h);
- h = apic_read(APIC_LVTTHMR);
- if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
- printk(KERN_DEBUG
- "CPU%d: Thermal monitoring handled by SMI\n", cpu);
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+ if (!cpu_has_apic || lapic_get_maxlvt() < 6)
+ return 0;
+ rdmsrl(MSR_IA32_MCG_CAP, cap);
+ *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
+ return !!(cap & MCG_CMCI_P);
+}
+
+/*
+ * The interrupt handler. This is called on every event.
+ * Just call the poller directly to log any events.
+ * This could in theory increase the threshold under high load,
+ * but doesn't for now.
+ */
+static void intel_threshold_interrupt(void)
+{
+ machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+ mce_notify_irq();
+}
+
+static void print_update(char *type, int *hdr, int num)
+{
+ if (*hdr == 0)
+ printk(KERN_INFO "CPU %d MCA banks", smp_processor_id());
+ *hdr = 1;
+ printk(KERN_CONT " %s:%d", type, num);
+}
+
+/*
+ * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
+ * on this CPU. Use the algorithm recommended in the SDM to discover shared
+ * banks.
+ */
+static void cmci_discover(int banks, int boot)
+{
+ unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
+ unsigned long flags;
+ int hdr = 0;
+ int i;
+
+ spin_lock_irqsave(&cmci_discover_lock, flags);
+ for (i = 0; i < banks; i++) {
+ u64 val;
+
+ if (test_bit(i, owned))
+ continue;
+
+ rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+
+ /* Already owned by someone else? */
+ if (val & CMCI_EN) {
+ if (test_and_clear_bit(i, owned) || boot)
+ print_update("SHD", &hdr, i);
+ __clear_bit(i, __get_cpu_var(mce_poll_banks));
+ continue;
+ }
+
+ val |= CMCI_EN | CMCI_THRESHOLD;
+ wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
+ rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+
+ /* Did the enable bit stick? -- the bank supports CMCI */
+ if (val & CMCI_EN) {
+ if (!test_and_set_bit(i, owned) || boot)
+ print_update("CMCI", &hdr, i);
+ __clear_bit(i, __get_cpu_var(mce_poll_banks));
+ } else {
+ WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+ }
+ }
+ spin_unlock_irqrestore(&cmci_discover_lock, flags);
+ if (hdr)
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * Just in case we missed an event during initialization check
+ * all the CMCI owned banks.
+ */
+void cmci_recheck(void)
+{
+ unsigned long flags;
+ int banks;
+
+ if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks))
return;
+ local_irq_save(flags);
+ machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+ local_irq_restore(flags);
+}
+
+/*
+ * Disable CMCI on this CPU for all banks it owns when it goes down.
+ * This allows other CPUs to claim the banks on rediscovery.
+ */
+void cmci_clear(void)
+{
+ unsigned long flags;
+ int i;
+ int banks;
+ u64 val;
+
+ if (!cmci_supported(&banks))
+ return;
+ spin_lock_irqsave(&cmci_discover_lock, flags);
+ for (i = 0; i < banks; i++) {
+ if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
+ continue;
+ /* Disable CMCI */
+ rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+ val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK);
+ wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
+ __clear_bit(i, __get_cpu_var(mce_banks_owned));
+ }
+ spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}
+
+/*
+ * After a CPU went down cycle through all the others and rediscover
+ * Must run in process context.
+ */
+void cmci_rediscover(int dying)
+{
+ int banks;
+ int cpu;
+ cpumask_var_t old;
+
+ if (!cmci_supported(&banks))
+ return;
+ if (!alloc_cpumask_var(&old, GFP_KERNEL))
+ return;
+ cpumask_copy(old, ¤t->cpus_allowed);
+
+ for_each_online_cpu(cpu) {
+ if (cpu == dying)
+ continue;
+ if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
+ continue;
+ /* Recheck banks in case CPUs don't all have the same */
+ if (cmci_supported(&banks))
+ cmci_discover(banks, 0);
}
- if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
- tm2 = 1;
+ set_cpus_allowed_ptr(current, old);
+ free_cpumask_var(old);
+}
- /* Check whether a vector already exists */
- if (h & APIC_VECTOR_MASK) {
- printk(KERN_DEBUG
- "CPU%d: Thermal LVT vector (%#x) already installed\n",
- cpu, (h & APIC_VECTOR_MASK));
+/*
+ * Reenable CMCI on this CPU in case a CPU down failed.
+ */
+void cmci_reenable(void)
+{
+ int banks;
+ if (cmci_supported(&banks))
+ cmci_discover(banks, 0);
+}
+
+static void intel_init_cmci(void)
+{
+ int banks;
+
+ if (!cmci_supported(&banks))
return;
- }
- /* We'll mask the thermal vector in the lapic till we're ready: */
- h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
- apic_write(APIC_LVTTHMR, h);
+ mce_threshold_vector = intel_threshold_interrupt;
+ cmci_discover(banks, 1);
+ /*
+ * For CPU #0 this runs with still disabled APIC, but that's
+ * ok because only the vector is set up. We still do another
+ * check for the banks later for CPU #0 just to make sure
+ * to not miss any events.
+ */
+ apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
+ cmci_recheck();
+}
- rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
- wrmsr(MSR_IA32_THERM_INTERRUPT,
- l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
-
- intel_set_thermal_handler();
-
- rdmsr(MSR_IA32_MISC_ENABLE, l, h);
- wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
-
- /* Unmask the thermal vector: */
- l = apic_read(APIC_LVTTHMR);
- apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
-
- printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
- cpu, tm2 ? "TM2" : "TM1");
-
- /* enable thermal throttle processing */
- atomic_set(&therm_throt_en, 1);
+void mce_intel_feature_init(struct cpuinfo_x86 *c)
+{
+ intel_init_thermal(c);
+ intel_init_cmci();
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
deleted file mode 100644
index f2ef695..0000000
--- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Intel specific MCE features.
- * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
- * Copyright (C) 2008, 2009 Intel Corporation
- * Author: Andi Kleen
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/percpu.h>
-#include <asm/processor.h>
-#include <asm/apic.h>
-#include <asm/msr.h>
-#include <asm/mce.h>
-#include <asm/hw_irq.h>
-#include <asm/idle.h>
-#include <asm/therm_throt.h>
-
-#include "mce.h"
-
-asmlinkage void smp_thermal_interrupt(void)
-{
- __u64 msr_val;
-
- ack_APIC_irq();
-
- exit_idle();
- irq_enter();
-
- rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
- if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT))
- mce_log_therm_throt_event(msr_val);
-
- inc_irq_stat(irq_thermal_count);
- irq_exit();
-}
-
-/*
- * Support for Intel Correct Machine Check Interrupts. This allows
- * the CPU to raise an interrupt when a corrected machine check happened.
- * Normally we pick those up using a regular polling timer.
- * Also supports reliable discovery of shared banks.
- */
-
-static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
-
-/*
- * cmci_discover_lock protects against parallel discovery attempts
- * which could race against each other.
- */
-static DEFINE_SPINLOCK(cmci_discover_lock);
-
-#define CMCI_THRESHOLD 1
-
-static int cmci_supported(int *banks)
-{
- u64 cap;
-
- if (mce_cmci_disabled || mce_ignore_ce)
- return 0;
-
- /*
- * Vendor check is not strictly needed, but the initial
- * initialization is vendor keyed and this
- * makes sure none of the backdoors are entered otherwise.
- */
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return 0;
- if (!cpu_has_apic || lapic_get_maxlvt() < 6)
- return 0;
- rdmsrl(MSR_IA32_MCG_CAP, cap);
- *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
- return !!(cap & MCG_CMCI_P);
-}
-
-/*
- * The interrupt handler. This is called on every event.
- * Just call the poller directly to log any events.
- * This could in theory increase the threshold under high load,
- * but doesn't for now.
- */
-static void intel_threshold_interrupt(void)
-{
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
- mce_notify_irq();
-}
-
-static void print_update(char *type, int *hdr, int num)
-{
- if (*hdr == 0)
- printk(KERN_INFO "CPU %d MCA banks", smp_processor_id());
- *hdr = 1;
- printk(KERN_CONT " %s:%d", type, num);
-}
-
-/*
- * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
- * on this CPU. Use the algorithm recommended in the SDM to discover shared
- * banks.
- */
-static void cmci_discover(int banks, int boot)
-{
- unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
- unsigned long flags;
- int hdr = 0;
- int i;
-
- spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- u64 val;
-
- if (test_bit(i, owned))
- continue;
-
- rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
-
- /* Already owned by someone else? */
- if (val & CMCI_EN) {
- if (test_and_clear_bit(i, owned) || boot)
- print_update("SHD", &hdr, i);
- __clear_bit(i, __get_cpu_var(mce_poll_banks));
- continue;
- }
-
- val |= CMCI_EN | CMCI_THRESHOLD;
- wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
- rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
-
- /* Did the enable bit stick? -- the bank supports CMCI */
- if (val & CMCI_EN) {
- if (!test_and_set_bit(i, owned) || boot)
- print_update("CMCI", &hdr, i);
- __clear_bit(i, __get_cpu_var(mce_poll_banks));
- } else {
- WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
- }
- }
- spin_unlock_irqrestore(&cmci_discover_lock, flags);
- if (hdr)
- printk(KERN_CONT "\n");
-}
-
-/*
- * Just in case we missed an event during initialization check
- * all the CMCI owned banks.
- */
-void cmci_recheck(void)
-{
- unsigned long flags;
- int banks;
-
- if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks))
- return;
- local_irq_save(flags);
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
- local_irq_restore(flags);
-}
-
-/*
- * Disable CMCI on this CPU for all banks it owns when it goes down.
- * This allows other CPUs to claim the banks on rediscovery.
- */
-void cmci_clear(void)
-{
- unsigned long flags;
- int i;
- int banks;
- u64 val;
-
- if (!cmci_supported(&banks))
- return;
- spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
- continue;
- /* Disable CMCI */
- rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
- val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK);
- wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
- __clear_bit(i, __get_cpu_var(mce_banks_owned));
- }
- spin_unlock_irqrestore(&cmci_discover_lock, flags);
-}
-
-/*
- * After a CPU went down cycle through all the others and rediscover
- * Must run in process context.
- */
-void cmci_rediscover(int dying)
-{
- int banks;
- int cpu;
- cpumask_var_t old;
-
- if (!cmci_supported(&banks))
- return;
- if (!alloc_cpumask_var(&old, GFP_KERNEL))
- return;
- cpumask_copy(old, ¤t->cpus_allowed);
-
- for_each_online_cpu(cpu) {
- if (cpu == dying)
- continue;
- if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
- continue;
- /* Recheck banks in case CPUs don't all have the same */
- if (cmci_supported(&banks))
- cmci_discover(banks, 0);
- }
-
- set_cpus_allowed_ptr(current, old);
- free_cpumask_var(old);
-}
-
-/*
- * Reenable CMCI on this CPU in case a CPU down failed.
- */
-void cmci_reenable(void)
-{
- int banks;
- if (cmci_supported(&banks))
- cmci_discover(banks, 0);
-}
-
-static void intel_init_cmci(void)
-{
- int banks;
-
- if (!cmci_supported(&banks))
- return;
-
- mce_threshold_vector = intel_threshold_interrupt;
- cmci_discover(banks, 1);
- /*
- * For CPU #0 this runs with still disabled APIC, but that's
- * ok because only the vector is set up. We still do another
- * check for the banks later for CPU #0 just to make sure
- * to not miss any events.
- */
- apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
- cmci_recheck();
-}
-
-void mce_intel_feature_init(struct cpuinfo_x86 *c)
-{
- intel_init_thermal(c);
- intel_init_cmci();
-}
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c
index 70b7104..f5f2d6f 100644
--- a/arch/x86/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c
@@ -17,10 +17,9 @@
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/mce.h>
#include <asm/msr.h>
-#include "mce.h"
-
static int firstbank;
#define MCE_RATE (15*HZ) /* timer rate is 15s */
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index 82cee10..4482aea 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -1,21 +1,15 @@
/*
* P4 specific Machine Check Exception Reporting
*/
-
-#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/smp.h>
-#include <asm/therm_throt.h>
#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/apic.h>
+#include <asm/mce.h>
#include <asm/msr.h>
-#include "mce.h"
-
/* as supported by the P4/Xeon family */
struct intel_mce_extended_msrs {
u32 eax;
@@ -33,46 +27,6 @@
static int mce_num_extended_msrs;
-
-#ifdef CONFIG_X86_MCE_P4THERMAL
-
-static void unexpected_thermal_interrupt(struct pt_regs *regs)
-{
- printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
- smp_processor_id());
- add_taint(TAINT_MACHINE_CHECK);
-}
-
-/* P4/Xeon Thermal transition interrupt handler: */
-static void intel_thermal_interrupt(struct pt_regs *regs)
-{
- __u64 msr_val;
-
- ack_APIC_irq();
-
- rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
- therm_throt_process(msr_val & THERM_STATUS_PROCHOT);
-}
-
-/* Thermal interrupt handler for this CPU setup: */
-static void (*vendor_thermal_interrupt)(struct pt_regs *regs) =
- unexpected_thermal_interrupt;
-
-void smp_thermal_interrupt(struct pt_regs *regs)
-{
- irq_enter();
- vendor_thermal_interrupt(regs);
- __get_cpu_var(irq_stat).irq_thermal_count++;
- irq_exit();
-}
-
-void intel_set_thermal_handler(void)
-{
- vendor_thermal_interrupt = intel_thermal_interrupt;
-}
-
-#endif /* CONFIG_X86_MCE_P4THERMAL */
-
/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
static void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
{
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 015f481..5c0e653 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -10,12 +10,11 @@
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/mce.h>
#include <asm/msr.h>
-#include "mce.h"
-
/* By default disabled */
-int mce_p5_enable;
+int mce_p5_enabled __read_mostly;
/* Machine check handler for Pentium class Intel CPUs: */
static void pentium_machine_check(struct pt_regs *regs, long error_code)
@@ -43,16 +42,14 @@
{
u32 l, h;
+ /* Default P5 to off as its often misconnected: */
+ if (!mce_p5_enabled)
+ return;
+
/* Check for MCE support: */
if (!cpu_has(c, X86_FEATURE_MCE))
return;
-#ifdef CONFIG_X86_OLD_MCE
- /* Default P5 to off as its often misconnected: */
- if (mce_disabled != -1)
- return;
-#endif
-
machine_check_vector = pentium_machine_check;
/* Make sure the vector pointer is visible before we enable MCEs: */
wmb();
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c
index 43c24e6..01e4f81 100644
--- a/arch/x86/kernel/cpu/mcheck/p6.c
+++ b/arch/x86/kernel/cpu/mcheck/p6.c
@@ -10,10 +10,9 @@
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/mce.h>
#include <asm/msr.h>
-#include "mce.h"
-
/* Machine Check Handler For PII/PIII */
static void intel_machine_check(struct pt_regs *regs, long error_code)
{
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 7b1ae2e..bff8dd1 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -13,13 +13,23 @@
* Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
* Inspired by Ross Biro's and Al Borchers' counter code.
*/
+#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/sysdev.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/smp.h>
#include <linux/cpu.h>
-#include <asm/therm_throt.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/apic.h>
+#include <asm/idle.h>
+#include <asm/mce.h>
+#include <asm/msr.h>
/* How long to wait between reporting thermal events */
#define CHECK_INTERVAL (300 * HZ)
@@ -27,7 +37,7 @@
static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
-atomic_t therm_throt_en = ATOMIC_INIT(0);
+static atomic_t therm_throt_en = ATOMIC_INIT(0);
#ifdef CONFIG_SYSFS
#define define_therm_throt_sysdev_one_ro(_name) \
@@ -82,7 +92,7 @@
* 1 : Event should be logged further, and a message has been
* printed to the syslog.
*/
-int therm_throt_process(int curr)
+static int therm_throt_process(int curr)
{
unsigned int cpu = smp_processor_id();
__u64 tmp_jiffs = get_jiffies_64();
@@ -186,6 +196,94 @@
return 0;
}
-
device_initcall(thermal_throttle_init_device);
+
#endif /* CONFIG_SYSFS */
+
+/* Thermal transition interrupt handler */
+static void intel_thermal_interrupt(void)
+{
+ __u64 msr_val;
+
+ rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+ if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT))
+ mce_log_therm_throt_event(msr_val);
+}
+
+static void unexpected_thermal_interrupt(void)
+{
+ printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
+ smp_processor_id());
+ add_taint(TAINT_MACHINE_CHECK);
+}
+
+static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
+
+asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
+{
+ exit_idle();
+ irq_enter();
+ inc_irq_stat(irq_thermal_count);
+ smp_thermal_vector();
+ irq_exit();
+ /* Ack only at the end to avoid potential reentry */
+ ack_APIC_irq();
+}
+
+void intel_init_thermal(struct cpuinfo_x86 *c)
+{
+ unsigned int cpu = smp_processor_id();
+ int tm2 = 0;
+ u32 l, h;
+
+ /* Thermal monitoring depends on ACPI and clock modulation*/
+ if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
+ return;
+
+ /*
+ * First check if its enabled already, in which case there might
+ * be some SMM goo which handles it, so we can't even put a handler
+ * since it might be delivered via SMI already:
+ */
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+ h = apic_read(APIC_LVTTHMR);
+ if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
+ printk(KERN_DEBUG
+ "CPU%d: Thermal monitoring handled by SMI\n", cpu);
+ return;
+ }
+
+ if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
+ tm2 = 1;
+
+ /* Check whether a vector already exists */
+ if (h & APIC_VECTOR_MASK) {
+ printk(KERN_DEBUG
+ "CPU%d: Thermal LVT vector (%#x) already installed\n",
+ cpu, (h & APIC_VECTOR_MASK));
+ return;
+ }
+
+ /* We'll mask the thermal vector in the lapic till we're ready: */
+ h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
+ apic_write(APIC_LVTTHMR, h);
+
+ rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
+ wrmsr(MSR_IA32_THERM_INTERRUPT,
+ l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
+
+ smp_thermal_vector = intel_thermal_interrupt;
+
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+ wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
+
+ /* Unmask the thermal vector: */
+ l = apic_read(APIC_LVTTHMR);
+ apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+
+ printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
+ cpu, tm2 ? "TM2" : "TM1");
+
+ /* enable thermal throttle processing */
+ atomic_set(&therm_throt_en, 1);
+}
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 81b0248..54060f5 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -9,10 +9,9 @@
#include <asm/processor.h>
#include <asm/system.h>
+#include <asm/mce.h>
#include <asm/msr.h>
-#include "mce.h"
-
/* Machine check handler for WinChip C6: */
static void winchip_machine_check(struct pt_regs *regs, long error_code)
{
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 275bc14..76dfef2 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -19,6 +19,7 @@
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <linux/highmem.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@@ -389,23 +390,23 @@
return event & CORE_EVNTSEL_MASK;
}
-static const u64 amd_0f_hw_cache_event_ids
+static const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
+ [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_ACCESS) ] = 0x0042, /* Data Cache Refills from L2 */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
+ [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
},
},
[ C(L1I ) ] = {
@@ -418,17 +419,17 @@
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
+ [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
@@ -438,8 +439,8 @@
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
+ [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -1223,6 +1224,8 @@
if (!intel_pmu_save_and_restart(counter))
continue;
+ data.period = counter->hw.last_period;
+
if (perf_counter_overflow(counter, 1, &data))
intel_pmu_disable_counter(&counter->hw, bit);
}
@@ -1459,18 +1462,16 @@
static int amd_pmu_init(void)
{
+ /* Performance-monitoring supported from K7 and later: */
+ if (boot_cpu_data.x86 < 6)
+ return -ENODEV;
+
x86_pmu = amd_pmu;
- switch (boot_cpu_data.x86) {
- case 0x0f:
- case 0x10:
- case 0x11:
- memcpy(hw_cache_event_ids, amd_0f_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
+ /* Events are common for all AMDs */
+ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
- pr_cont("AMD Family 0f/10/11 events, ");
- break;
- }
return 0;
}
@@ -1554,9 +1555,9 @@
*/
static inline
-void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
+void callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
- if (entry->nr < MAX_STACK_DEPTH)
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
}
@@ -1577,8 +1578,8 @@
static int backtrace_stack(void *data, char *name)
{
- /* Don't bother with IRQ stacks for now */
- return -1;
+ /* Process all stacks: */
+ return 0;
}
static void backtrace_address(void *data, unsigned long addr, int reliable)
@@ -1596,47 +1597,59 @@
.address = backtrace_address,
};
+#include "../dumpstack.h"
+
static void
perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
- unsigned long bp;
- char *stack;
- int nr = entry->nr;
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
+ callchain_store(entry, regs->ip);
- callchain_store(entry, instruction_pointer(regs));
-
- stack = ((char *)regs + sizeof(struct pt_regs));
-#ifdef CONFIG_FRAME_POINTER
- bp = frame_pointer(regs);
-#else
- bp = 0;
-#endif
-
- dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
-
- entry->kernel = entry->nr - nr;
+ dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
}
+/*
+ * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
+ */
+static unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long offset, addr = (unsigned long)from;
+ int type = in_nmi() ? KM_NMI : KM_IRQ0;
+ unsigned long size, len = 0;
+ struct page *page;
+ void *map;
+ int ret;
-struct stack_frame {
- const void __user *next_fp;
- unsigned long return_address;
-};
+ do {
+ ret = __get_user_pages_fast(addr, 1, 0, &page);
+ if (!ret)
+ break;
+
+ offset = addr & (PAGE_SIZE - 1);
+ size = min(PAGE_SIZE - offset, n - len);
+
+ map = kmap_atomic(page, type);
+ memcpy(to, map+offset, size);
+ kunmap_atomic(map, type);
+ put_page(page);
+
+ len += size;
+ to += size;
+ addr += size;
+
+ } while (len < n);
+
+ return len;
+}
static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
{
- int ret;
+ unsigned long bytes;
- if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
- return 0;
+ bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
- ret = 1;
- pagefault_disable();
- if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
- ret = 0;
- pagefault_enable();
-
- return ret;
+ return bytes == sizeof(*frame);
}
static void
@@ -1644,28 +1657,28 @@
{
struct stack_frame frame;
const void __user *fp;
- int nr = entry->nr;
- regs = (struct pt_regs *)current->thread.sp0 - 1;
- fp = (void __user *)regs->bp;
+ if (!user_mode(regs))
+ regs = task_pt_regs(current);
+ fp = (void __user *)regs->bp;
+
+ callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, regs->ip);
- while (entry->nr < MAX_STACK_DEPTH) {
- frame.next_fp = NULL;
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ frame.next_frame = NULL;
frame.return_address = 0;
if (!copy_stack_frame(fp, &frame))
break;
- if ((unsigned long)fp < user_stack_pointer(regs))
+ if ((unsigned long)fp < regs->sp)
break;
callchain_store(entry, frame.return_address);
- fp = frame.next_fp;
+ fp = frame.next_frame;
}
-
- entry->user = entry->nr - nr;
}
static void
@@ -1701,9 +1714,6 @@
entry = &__get_cpu_var(irq_entry);
entry->nr = 0;
- entry->hv = 0;
- entry->kernel = 0;
- entry->user = 0;
perf_do_callchain(regs, entry);
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index d6f5b9f..5c481f6 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -716,11 +716,15 @@
wd_ops = &k7_wd_ops;
break;
case X86_VENDOR_INTEL:
- /*
- * Work around Core Duo (Yonah) errata AE49 where perfctr1
- * doesn't have a working enable bit.
+ /* Work around where perfctr1 doesn't have a working enable
+ * bit as described in the following errata:
+ * AE49 Core Duo and Intel Core Solo 65 nm
+ * AN49 Intel Pentium Dual-Core
+ * AF49 Dual-Core Intel Xeon Processor LV
*/
- if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
+ if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) ||
+ ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 15 &&
+ boot_cpu_data.x86_mask == 4))) {
intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0;
intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0;
}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index ff95824..5e409dc 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -27,6 +27,7 @@
#include <asm/cpu.h>
#include <asm/reboot.h>
#include <asm/virtext.h>
+#include <asm/iommu.h>
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
@@ -103,5 +104,10 @@
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
+
+#ifdef CONFIG_X86_64
+ pci_iommu_shutdown();
+#endif
+
crash_save_cpu(regs, safe_smp_processor_id());
}
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index 1736acc..96f7ac0 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -240,10 +240,35 @@
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
int e820_type;
- if (md->attribute & EFI_MEMORY_WB)
- e820_type = E820_RAM;
- else
+ switch (md->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ if (md->attribute & EFI_MEMORY_WB)
+ e820_type = E820_RAM;
+ else
+ e820_type = E820_RESERVED;
+ break;
+ case EFI_ACPI_RECLAIM_MEMORY:
+ e820_type = E820_ACPI;
+ break;
+ case EFI_ACPI_MEMORY_NVS:
+ e820_type = E820_NVS;
+ break;
+ case EFI_UNUSABLE_MEMORY:
+ e820_type = E820_UNUSABLE;
+ break;
+ default:
+ /*
+ * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
+ * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
+ * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
+ */
e820_type = E820_RESERVED;
+ break;
+ }
e820_add_region(start, size, e820_type);
}
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index c929add..c097e7d 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -48,7 +48,6 @@
#include <asm/segment.h>
#include <asm/smp.h>
#include <asm/page_types.h>
-#include <asm/desc.h>
#include <asm/percpu.h>
#include <asm/dwarf2.h>
#include <asm/processor-flags.h>
@@ -84,7 +83,7 @@
#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else
#define preempt_stop(clobbers)
-#define resume_kernel restore_nocheck
+#define resume_kernel restore_all
#endif
.macro TRACE_IRQS_IRET
@@ -372,7 +371,7 @@
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
- jnz restore_nocheck
+ jnz restore_all
need_resched:
movl TI_flags(%ebp), %ecx # need_resched set ?
testb $_TIF_NEED_RESCHED, %cl
@@ -540,6 +539,8 @@
jne syscall_exit_work
restore_all:
+ TRACE_IRQS_IRET
+restore_all_notrace:
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
# are returning to the kernel.
@@ -551,8 +552,6 @@
CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS
restore_nocheck:
- TRACE_IRQS_IRET
-restore_nocheck_notrace:
RESTORE_REGS 4 # skip orig_eax/error_code
CFI_ADJUST_CFA_OFFSET -4
irq_return:
@@ -588,22 +587,34 @@
jne restore_nocheck
#endif
- /* If returning to userspace with 16bit stack,
- * try to fix the higher word of ESP, as the CPU
- * won't restore it.
- * This is an "official" bug of all the x86-compatible
- * CPUs, which we can try to work around to make
- * dosemu and wine happy. */
- movl PT_OLDESP(%esp), %eax
- movl %esp, %edx
- call patch_espfix_desc
+/*
+ * Setup and switch to ESPFIX stack
+ *
+ * We're returning to userspace with a 16 bit stack. The CPU will not
+ * restore the high word of ESP for us on executing iret... This is an
+ * "official" bug of all the x86-compatible CPUs, which we can work
+ * around to make dosemu and wine happy. We do this by preloading the
+ * high word of ESP with the high word of the userspace ESP while
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
+ PER_CPU(gdt_page, %ebx)
+ shr $16, %edx
+ mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
+ mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
pushl $__ESPFIX_SS
CFI_ADJUST_CFA_OFFSET 4
- pushl %eax
+ push %eax /* new kernel esp */
CFI_ADJUST_CFA_OFFSET 4
+ /* Disable interrupts, but do not irqtrace this section: we
+ * will soon execute iret and the tracer was already set to
+ * the irqstate after the iret */
DISABLE_INTERRUPTS(CLBR_EAX)
- TRACE_IRQS_OFF
- lss (%esp), %esp
+ lss (%esp), %esp /* switch to espfix segment */
CFI_ADJUST_CFA_OFFSET -8
jmp restore_nocheck
CFI_ENDPROC
@@ -716,15 +727,24 @@
PTREGSCALL(vm86old)
.macro FIXUP_ESPFIX_STACK
- /* since we are on a wrong stack, we cant make it a C code :( */
+/*
+ * Switch back for ESPFIX stack to the normal zerobased stack
+ *
+ * We can't call C functions using the ESPFIX stack. This code reads
+ * the high word of the segment base from the GDT and swiches to the
+ * normal stack and adjusts ESP with the matching offset.
+ */
+ /* fixup the stack */
PER_CPU(gdt_page, %ebx)
- GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
- addl %esp, %eax
+ mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
+ mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
+ shl $16, %eax
+ addl %esp, %eax /* the adjusted stack pointer */
pushl $__KERNEL_DS
CFI_ADJUST_CFA_OFFSET 4
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
- lss (%esp), %esp
+ lss (%esp), %esp /* switch to the normal stack segment */
CFI_ADJUST_CFA_OFFSET -8
.endm
.macro UNWIND_ESPFIX_STACK
@@ -1154,6 +1174,7 @@
pushl %edx
movl 0xc(%esp), %edx
lea 0x4(%ebp), %eax
+ movl (%ebp), %ecx
subl $MCOUNT_INSN_SIZE, %edx
call prepare_ftrace_return
popl %edx
@@ -1168,6 +1189,7 @@
pushl %eax
pushl %ecx
pushl %edx
+ movl %ebp, %eax
call ftrace_return_to_handler
movl %eax, 0xc(%esp)
popl %edx
@@ -1329,7 +1351,7 @@
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
- jmp restore_nocheck_notrace
+ jmp restore_all_notrace
CFI_ENDPROC
nmi_stack_fixup:
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index de74f0a..c251be7 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -135,6 +135,7 @@
leaq 8(%rbp), %rdi
movq 0x38(%rsp), %rsi
+ movq (%rbp), %rdx
subq $MCOUNT_INSN_SIZE, %rsi
call prepare_ftrace_return
@@ -150,6 +151,7 @@
/* Save the return values */
movq %rax, (%rsp)
movq %rdx, 8(%rsp)
+ movq %rbp, %rdi
call ftrace_return_to_handler
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index b79c553..d94e1ea 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -408,7 +408,8 @@
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
{
unsigned long old;
int faulted;
@@ -453,7 +454,8 @@
return;
}
- if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth,
+ frame_pointer) == -EBUSY) {
*parent = old;
return;
}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index dc5ed4b..8663afb 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -13,7 +13,6 @@
#include <asm/segment.h>
#include <asm/page_types.h>
#include <asm/pgtable_types.h>
-#include <asm/desc.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 54b29bb..fa54f78 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -12,7 +12,6 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/init.h>
-#include <asm/desc.h>
#include <asm/segment.h>
#include <asm/pgtable.h>
#include <asm/page.h>
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 81408b9..dedc2bd 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -510,7 +510,8 @@
{
if (request_irq(dev->irq, hpet_interrupt_handler,
- IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
+ IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
+ dev->name, dev))
return -1;
disable_irq(dev->irq);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 745579b..4763047 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -32,6 +32,8 @@
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0;
+int iommu_pass_through;
+
dma_addr_t bad_dma_address __read_mostly = 0;
EXPORT_SYMBOL(bad_dma_address);
@@ -209,6 +211,10 @@
#ifdef CONFIG_SWIOTLB
if (!strncmp(p, "soft", 4))
swiotlb = 1;
+ if (!strncmp(p, "pt", 2)) {
+ iommu_pass_through = 1;
+ return 1;
+ }
#endif
gart_parse_options(p);
@@ -290,6 +296,8 @@
void pci_iommu_shutdown(void)
{
gart_iommu_shutdown();
+
+ amd_iommu_shutdown();
}
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index a1712f2..6af96ee 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -71,7 +71,8 @@
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
- if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
+ if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
+ iommu_pass_through)
swiotlb = 1;
#endif
if (swiotlb_force)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 5f935f0..a0f48f5 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -54,6 +54,7 @@
#include <asm/traps.h>
#include <asm/desc.h>
#include <asm/i387.h>
+#include <asm/mce.h>
#include <asm/mach_traps.h>
@@ -65,8 +66,6 @@
#include <asm/setup.h>
#include <asm/traps.h>
-#include "cpu/mcheck/mce.h"
-
asmlinkage int system_call(void);
/* Do we ignore FPU interrupts ? */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b0597ad..6e1a368 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -590,22 +590,26 @@
*/
DEFINE_PER_CPU(unsigned long, cyc2ns);
+DEFINE_PER_CPU(unsigned long long, cyc2ns_offset);
static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
{
- unsigned long long tsc_now, ns_now;
+ unsigned long long tsc_now, ns_now, *offset;
unsigned long flags, *scale;
local_irq_save(flags);
sched_clock_idle_sleep_event();
scale = &per_cpu(cyc2ns, cpu);
+ offset = &per_cpu(cyc2ns_offset, cpu);
rdtscll(tsc_now);
ns_now = __cycles_2_ns(tsc_now);
- if (cpu_khz)
+ if (cpu_khz) {
*scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
+ *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR);
+ }
sched_clock_idle_wakeup_event(0);
local_irq_restore(flags);
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index ec13cb5..b7c2849 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -127,7 +127,7 @@
long strnlen_user(const char __user *s, long n)
{
- if (!access_ok(VERIFY_READ, s, n))
+ if (!access_ok(VERIFY_READ, s, 1))
return 0;
return __strnlen_user(s, n);
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index baa0e86..78a5fff 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -952,8 +952,6 @@
tsk = current;
mm = tsk->mm;
- prefetchw(&mm->mmap_sem);
-
/* Get the faulting address: */
address = read_cr2();
@@ -963,6 +961,7 @@
*/
if (kmemcheck_active(regs))
kmemcheck_hide(regs);
+ prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
return;
@@ -1114,7 +1113,7 @@
* make sure we exit gracefully rather than endlessly redo
* the fault:
*/
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, fault);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index f974809..71da1bc 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -14,7 +14,7 @@
static inline pte_t gup_get_pte(pte_t *ptep)
{
#ifndef CONFIG_X86_PAE
- return *ptep;
+ return ACCESS_ONCE(*ptep);
#else
/*
* With get_user_pages_fast, we walk down the pagetables without taking
@@ -219,6 +219,62 @@
return 1;
}
+/*
+ * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
+ * back to the regular GUP.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr, len, end;
+ unsigned long next;
+ unsigned long flags;
+ pgd_t *pgdp;
+ int nr = 0;
+
+ start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
+
+ /*
+ * XXX: batch / limit 'nr', to avoid large irq off latency
+ * needs some instrumenting to determine the common sizes used by
+ * important workloads (eg. DB2), and whether limiting the batch size
+ * will decrease performance.
+ *
+ * It seems like we're in the clear for the moment. Direct-IO is
+ * the main guy that batches up lots of get_user_pages, and even
+ * they are limited to 64-at-a-time which is not so many.
+ */
+ /*
+ * This doesn't prevent pagetable teardown, but does prevent
+ * the pagetables and pages from being freed on x86.
+ *
+ * So long as we atomically load page table pointers versus teardown
+ * (which we do on x86, with the above PAE exception), we can follow the
+ * address down to the the page and take a ref on it.
+ */
+ local_irq_save(flags);
+ pgdp = pgd_offset(mm, addr);
+ do {
+ pgd_t pgd = *pgdp;
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ break;
+ if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ break;
+ } while (pgdp++, addr = next, addr != end);
+ local_irq_restore(flags);
+
+ return nr;
+}
+
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9c54329..c4378f4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -527,7 +527,7 @@
return phys_pud_init(pud, addr, end, page_size_mask);
}
-unsigned long __init
+unsigned long __meminit
kernel_physical_mapping_init(unsigned long start,
unsigned long end,
unsigned long page_size_mask)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index c0ecf25..16c3fda 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -38,15 +38,26 @@
struct acpi_resource_address64 addr;
acpi_status status;
- if (info->res_num >= PCI_BUS_NUM_RESOURCES)
- return AE_OK;
-
status = resource_to_addr(acpi_res, &addr);
if (ACPI_SUCCESS(status))
info->res_num++;
return AE_OK;
}
+static int
+bus_has_transparent_bridge(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 class = dev->class >> 8;
+
+ if (class == PCI_CLASS_BRIDGE_PCI && dev->transparent)
+ return true;
+ }
+ return false;
+}
+
static acpi_status
setup_resource(struct acpi_resource *acpi_res, void *data)
{
@@ -56,9 +67,7 @@
acpi_status status;
unsigned long flags;
struct resource *root;
-
- if (info->res_num >= PCI_BUS_NUM_RESOURCES)
- return AE_OK;
+ int max_root_bus_resources = PCI_BUS_NUM_RESOURCES;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
@@ -82,6 +91,18 @@
res->end = res->start + addr.address_length - 1;
res->child = NULL;
+ if (bus_has_transparent_bridge(info->bus))
+ max_root_bus_resources -= 3;
+ if (info->res_num >= max_root_bus_resources) {
+ printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx "
+ "from %s for %s due to _CRS returning more than "
+ "%d resource descriptors\n", (unsigned long) res->start,
+ (unsigned long) res->end, root->name, info->name,
+ max_root_bus_resources);
+ info->res_num++;
+ return AE_OK;
+ }
+
if (insert_resource(root, res)) {
printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx "
"from %s for %s\n", (unsigned long) res->start,
@@ -217,7 +238,7 @@
#endif
}
- if (bus && (pci_probe & PCI_USE__CRS))
+ if (bus && !(pci_probe & PCI_NO_ROOT_CRS))
get_current_resources(device, busnum, domain, bus);
return bus;
}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index f893d6a..2255f88 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -101,7 +101,7 @@
struct pci_root_info *info;
/* don't go for it if _CRS is used */
- if (pci_probe & PCI_USE__CRS)
+ if (!(pci_probe & PCI_NO_ROOT_CRS))
return;
/* if only one root bus, don't need to anything */
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2202b62..4740119 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -515,8 +515,8 @@
} else if (!strcmp(str, "assign-busses")) {
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
return NULL;
- } else if (!strcmp(str, "use_crs")) {
- pci_probe |= PCI_USE__CRS;
+ } else if (!strcmp(str, "nocrs")) {
+ pci_probe |= PCI_NO_ROOT_CRS;
return NULL;
} else if (!strcmp(str, "earlydump")) {
pci_early_dump_regs = 1;
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 768bee0..bb84fbc 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -263,7 +263,54 @@
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
-# CONFIG_NETDEVICES is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+CONFIG_SMSC_PHY=y
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_NET_ETHERNET is not set
+CONFIG_NETDEV_1000=y
+CONFIG_S6GMAC=y
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -304,8 +351,6 @@
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
@@ -387,7 +432,59 @@
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+# CONFIG_RTC_INTF_SYSFS is not set
+# CONFIG_RTC_INTF_PROC is not set
+# CONFIG_RTC_INTF_DEV is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+CONFIG_RTC_DRV_M41T80=y
+# CONFIG_RTC_DRV_M41T80_WDT is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 8fc1c0c..b7b8fbe 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -155,5 +155,100 @@
#endif
+#define XTENSA_CACHEBLK_LOG2 29
+#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
+#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
+
+#if XCHAL_HAVE_CACHEATTR
+static inline u32 xtensa_get_cacheattr(void)
+{
+ u32 r;
+ asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
+ return r;
+}
+
+static inline u32 xtensa_get_dtlb1(u32 addr)
+{
+ u32 r = addr & XTENSA_CACHEBLK_MASK;
+ return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
+ & 0xF);
+}
+#else
+static inline u32 xtensa_get_dtlb1(u32 addr)
+{
+ u32 r;
+ asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
+ asm volatile(" dsync");
+ return r;
+}
+
+static inline u32 xtensa_get_cacheattr(void)
+{
+ u32 r = 0;
+ u32 a = 0;
+ do {
+ a -= XTENSA_CACHEBLK_SIZE;
+ r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
+ } while (a);
+ return r;
+}
+#endif
+
+static inline int xtensa_need_flush_dma_source(u32 addr)
+{
+ return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
+}
+
+static inline int xtensa_need_invalidate_dma_destination(u32 addr)
+{
+ return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
+}
+
+static inline void flush_dcache_unaligned(u32 addr, u32 size)
+{
+ u32 cnt;
+ if (size) {
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt--) {
+ asm volatile(" dhwb %0, 0" : : "a"(addr));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dsync");
+ }
+}
+
+static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
+{
+ int cnt;
+ if (size) {
+ asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt-- > 0) {
+ asm volatile(" dhi %0, %1" : : "a"(addr),
+ "n"(XCHAL_DCACHE_LINESIZE));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dhwbi %0, %1" : : "a"(addr),
+ "n"(XCHAL_DCACHE_LINESIZE));
+ asm volatile(" dsync");
+ }
+}
+
+static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
+{
+ u32 cnt;
+ if (size) {
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt--) {
+ asm volatile(" dhwbi %0, 0" : : "a"(addr));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dsync");
+ }
+}
+
#endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/gpio.h b/arch/xtensa/include/asm/gpio.h
index 0763b07..a8c9fc4 100644
--- a/arch/xtensa/include/asm/gpio.h
+++ b/arch/xtensa/include/asm/gpio.h
@@ -38,14 +38,14 @@
return __gpio_cansleep(gpio);
}
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return __gpio_to_irq(gpio);
+}
+
/*
* Not implemented, yet.
*/
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return -ENOSYS;
-}
-
static inline int irq_to_gpio(unsigned int irq)
{
return -EINVAL;
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index dfac82d..4c0ccc9 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -11,6 +11,7 @@
#ifndef _XTENSA_IRQ_H
#define _XTENSA_IRQ_H
+#include <linux/init.h>
#include <platform/hardware.h>
#include <variant/core.h>
@@ -21,11 +22,20 @@
static inline void variant_irq_disable(unsigned int irq) { }
#endif
+#ifndef VARIANT_NR_IRQS
+# define VARIANT_NR_IRQS 0
+#endif
#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+
+#if VARIANT_NR_IRQS == 0
+static inline void variant_init_irq(void) { }
+#else
+void variant_init_irq(void) __init;
+#endif
static __inline__ int irq_canonicalize(int irq)
{
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a36c85e..a1badb3 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -197,4 +197,6 @@
}
cached_irq_mask = 0;
+
+ variant_init_irq();
}
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index bdd860d..bc07333 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -106,7 +106,7 @@
* the fault.
*/
survive:
- fault = handle_mm_fault(mm, vma, address, is_write);
+ fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
diff --git a/arch/xtensa/platforms/s6105/device.c b/arch/xtensa/platforms/s6105/device.c
index 78b08be..65333ff 100644
--- a/arch/xtensa/platforms/s6105/device.c
+++ b/arch/xtensa/platforms/s6105/device.c
@@ -5,14 +5,27 @@
*/
#include <linux/kernel.h>
+#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <variant/hardware.h>
+#include <variant/dmac.h>
+#include <platform/gpio.h>
+
+#define GPIO3_INTNUM 3
#define UART_INTNUM 4
+#define GMAC_INTNUM 5
+
+static const signed char gpio3_irq_mappings[] = {
+ S6_INTC_GPIO(3),
+ -1
+};
static const signed char uart_irq_mappings[] = {
S6_INTC_UART(0),
@@ -20,8 +33,18 @@
-1,
};
+static const signed char gmac_irq_mappings[] = {
+ S6_INTC_GMAC_STAT,
+ S6_INTC_GMAC_ERR,
+ S6_INTC_DMA_HOSTTERMCNT(0),
+ S6_INTC_DMA_HOSTTERMCNT(1),
+ -1
+};
+
const signed char *platform_irq_mappings[NR_IRQS] = {
+ [GPIO3_INTNUM] = gpio3_irq_mappings,
[UART_INTNUM] = uart_irq_mappings,
+ [GMAC_INTNUM] = gmac_irq_mappings,
};
static struct plat_serial8250_port serial_platform_data[] = {
@@ -46,6 +69,66 @@
{ },
};
+static struct resource s6_gmac_resource[] = {
+ {
+ .name = "mem",
+ .start = (resource_size_t)S6_REG_GMAC,
+ .end = (resource_size_t)S6_REG_GMAC + 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "dma",
+ .start = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX),
+ .end = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX) + 0x100 - 1,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "dma",
+ .start = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX),
+ .end = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX) + 0x100 - 1,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "io",
+ .start = (resource_size_t)S6_MEM_GMAC,
+ .end = (resource_size_t)S6_MEM_GMAC + 0x2000000 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "irq",
+ .start = (resource_size_t)GMAC_INTNUM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "irq",
+ .start = (resource_size_t)PHY_POLL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __init prepare_phy_irq(int pin)
+{
+ int irq;
+ if (gpio_request(pin, "s6gmac_phy") < 0)
+ goto fail;
+ if (gpio_direction_input(pin) < 0)
+ goto free;
+ irq = gpio_to_irq(pin);
+ if (irq < 0)
+ goto free;
+ if (set_irq_type(irq, IRQ_TYPE_LEVEL_LOW) < 0)
+ goto free;
+ return irq;
+free:
+ gpio_free(pin);
+fail:
+ return PHY_POLL;
+}
+
static struct platform_device platform_devices[] = {
{
.name = "serial8250",
@@ -54,12 +137,23 @@
.platform_data = serial_platform_data,
},
},
+ {
+ .name = "s6gmac",
+ .id = 0,
+ .resource = s6_gmac_resource,
+ .num_resources = ARRAY_SIZE(s6_gmac_resource),
+ },
+ {
+ I2C_BOARD_INFO("m41t62", S6I2C_ADDR_M41T62),
+ },
};
static int __init device_init(void)
{
int i;
+ s6_gmac_resource[5].start = prepare_phy_irq(GPIO_PHY_IRQ);
+
for (i = 0; i < ARRAY_SIZE(platform_devices); i++)
platform_device_register(&platform_devices[i]);
return 0;
diff --git a/arch/xtensa/platforms/s6105/setup.c b/arch/xtensa/platforms/s6105/setup.c
index 855ddea..86ce730 100644
--- a/arch/xtensa/platforms/s6105/setup.c
+++ b/arch/xtensa/platforms/s6105/setup.c
@@ -35,12 +35,21 @@
{
unsigned long reg;
+ reg = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
+ reg &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC |
+ S6_GREG1_PLLSEL_GMII_MASK << S6_GREG1_PLLSEL_GMII);
+ reg |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC |
+ S6_GREG1_PLLSEL_GMII_125MHZ << S6_GREG1_PLLSEL_GMII;
+ writel(reg, S6_REG_GREG1 + S6_GREG1_PLLSEL);
+
reg = readl(S6_REG_GREG1 + S6_GREG1_CLKGATE);
reg &= ~(1 << S6_GREG1_BLOCK_SB);
+ reg &= ~(1 << S6_GREG1_BLOCK_GMAC);
writel(reg, S6_REG_GREG1 + S6_GREG1_CLKGATE);
reg = readl(S6_REG_GREG1 + S6_GREG1_BLOCKENA);
reg |= 1 << S6_GREG1_BLOCK_SB;
+ reg |= 1 << S6_GREG1_BLOCK_GMAC;
writel(reg, S6_REG_GREG1 + S6_GREG1_BLOCKENA);
printk(KERN_NOTICE "S6105 on Stretch S6000 - "
@@ -49,7 +58,7 @@
void __init platform_init(bp_tag_t *first)
{
- s6_gpio_init();
+ s6_gpio_init(0);
gpio_request(GPIO_LED1_NGREEN, "led1_green");
gpio_request(GPIO_LED1_RED, "led1_red");
gpio_direction_output(GPIO_LED1_NGREEN, 1);
diff --git a/arch/xtensa/variants/s6000/Makefile b/arch/xtensa/variants/s6000/Makefile
index d83f380..3e7ef0a 100644
--- a/arch/xtensa/variants/s6000/Makefile
+++ b/arch/xtensa/variants/s6000/Makefile
@@ -1,4 +1,4 @@
# s6000 Makefile
-obj-y += irq.o gpio.o
+obj-y += irq.o gpio.o dmac.o
obj-$(CONFIG_XTENSA_CALIBRATE_CCOUNT) += delay.o
diff --git a/arch/xtensa/variants/s6000/dmac.c b/arch/xtensa/variants/s6000/dmac.c
new file mode 100644
index 0000000..dc7f7c5
--- /dev/null
+++ b/arch/xtensa/variants/s6000/dmac.c
@@ -0,0 +1,173 @@
+/*
+ * Authors: Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ * (c) 2008 emlix GmbH http://www.emlix.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <asm/cacheflush.h>
+#include <variant/dmac.h>
+
+/* DMA engine lookup */
+
+struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
+
+
+/* DMA control, per engine */
+
+void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size)
+{
+ if (xtensa_need_flush_dma_source(src)) {
+ u32 base = src;
+ u32 span = size;
+ u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ if (chunk && (size > chunk)) {
+ s32 skip =
+ readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
+ u32 gaps = (size+chunk-1)/chunk - 1;
+ if (skip >= 0) {
+ span += gaps * skip;
+ } else if (-skip > chunk) {
+ s32 decr = gaps * (chunk + skip);
+ base += decr;
+ span = chunk - decr;
+ } else {
+ span = max(span + gaps * skip,
+ (chunk + skip) * gaps - skip);
+ }
+ }
+ flush_dcache_unaligned(base, span);
+ }
+ if (xtensa_need_invalidate_dma_destination(dst)) {
+ u32 base = dst;
+ u32 span = size;
+ u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ if (chunk && (size > chunk)) {
+ s32 skip =
+ readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
+ u32 gaps = (size+chunk-1)/chunk - 1;
+ if (skip >= 0) {
+ span += gaps * skip;
+ } else if (-skip > chunk) {
+ s32 decr = gaps * (chunk + skip);
+ base += decr;
+ span = chunk - decr;
+ } else {
+ span = max(span + gaps * skip,
+ (chunk + skip) * gaps - skip);
+ }
+ }
+ invalidate_dcache_unaligned(base, span);
+ }
+ s6dmac_put_fifo(dmac, chan, src, dst, size);
+}
+
+void s6dmac_disable_error_irqs(u32 dmac, u32 mask)
+{
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ _s6dmac_disable_error_irqs(dmac, mask);
+ spin_unlock_irqrestore(spinl, flags);
+}
+
+u32 s6dmac_int_sources(u32 dmac, u32 channel)
+{
+ u32 mask, ret, tmp;
+ mask = 1 << channel;
+
+ tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR);
+ ret = tmp >> channel;
+
+ tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR);
+ ret |= (tmp >> channel) << 1;
+
+ tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR);
+ ret |= (tmp >> channel) << 2;
+
+ tmp = readl(dmac + S6_DMA_INTRAW0);
+ tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER);
+ writel(tmp, dmac + S6_DMA_INTCLEAR0);
+
+ if (tmp & (mask << S6_DMA_INT0_UNDER))
+ ret |= 1 << 3;
+ if (tmp & (mask << S6_DMA_INT0_OVER))
+ ret |= 1 << 4;
+
+ tmp = readl(dmac + S6_DMA_MASTERERRINFO);
+ mask <<= S6_DMA_INT1_CHANNEL;
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << S6_DMA_INT1_MASTER;
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << (S6_DMA_INT1_MASTER + 1);
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << (S6_DMA_INT1_MASTER + 2);
+
+ tmp = readl(dmac + S6_DMA_INTRAW1) & mask;
+ writel(tmp, dmac + S6_DMA_INTCLEAR1);
+ ret |= ((tmp >> channel) & 1) << 5;
+ ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6;
+
+ return ret;
+}
+
+void s6dmac_release_chan(u32 dmac, int chan)
+{
+ if (chan >= 0)
+ s6dmac_disable_chan(dmac, chan);
+}
+
+
+/* global init */
+
+static inline void __init dmac_init(u32 dmac, u8 chan_nb)
+{
+ s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac;
+ spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock);
+ s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb;
+ writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER,
+ dmac + S6_DMA_INTCLEAR1);
+}
+
+static inline void __init dmac_master(u32 dmac,
+ u32 m0start, u32 m0end, u32 m1start, u32 m1end)
+{
+ writel(m0start, dmac + S6_DMA_MASTER0START);
+ writel(m0end - 1, dmac + S6_DMA_MASTER0END);
+ writel(m1start, dmac + S6_DMA_MASTER1START);
+ writel(m1end - 1, dmac + S6_DMA_MASTER1END);
+}
+
+static void __init s6_dmac_init(void)
+{
+ dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB);
+ dmac_master(S6_REG_LMSDMA,
+ S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC);
+ dmac_init(S6_REG_NIDMA, S6_NIDMA_NB);
+ dmac_init(S6_REG_DPDMA, S6_DPDMA_NB);
+ dmac_master(S6_REG_DPDMA,
+ S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA);
+ dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB);
+ dmac_master(S6_REG_HIFDMA,
+ S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX);
+}
+
+arch_initcall(s6_dmac_init);
diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c
index 79317fd..380a70f 100644
--- a/arch/xtensa/variants/s6000/gpio.c
+++ b/arch/xtensa/variants/s6000/gpio.c
@@ -4,15 +4,20 @@
* Copyright (c) 2009 emlix GmbH
* Authors: Oskar Schirmer <os@emlix.com>
* Johannes Weiner <jw@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/gpio.h>
#include <variant/hardware.h>
+#define IRQ_BASE XTENSA_NR_IRQS
+
#define S6_GPIO_DATA 0x000
#define S6_GPIO_IS 0x404
#define S6_GPIO_IBE 0x408
@@ -52,19 +57,175 @@
writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
}
+static int to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset < 8)
+ return offset + IRQ_BASE;
+ return -EINVAL;
+}
+
static struct gpio_chip gpiochip = {
.owner = THIS_MODULE,
.direction_input = direction_input,
.get = get,
.direction_output = direction_output,
.set = set,
+ .to_irq = to_irq,
.base = 0,
.ngpio = 24,
.can_sleep = 0, /* no blocking io needed */
.exported = 0, /* no exporting to userspace */
};
-int s6_gpio_init(void)
+int s6_gpio_init(u32 afsel)
{
+ writeb(afsel, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL);
+ writeb(afsel >> 8, S6_REG_GPIO + S6_GPIO_BANK(1) + S6_GPIO_AFSEL);
+ writeb(afsel >> 16, S6_REG_GPIO + S6_GPIO_BANK(2) + S6_GPIO_AFSEL);
return gpiochip_add(&gpiochip);
}
+
+static void ack(unsigned int irq)
+{
+ writeb(1 << (irq - IRQ_BASE), S6_REG_GPIO + S6_GPIO_IC);
+}
+
+static void mask(unsigned int irq)
+{
+ u8 r = readb(S6_REG_GPIO + S6_GPIO_IE);
+ r &= ~(1 << (irq - IRQ_BASE));
+ writeb(r, S6_REG_GPIO + S6_GPIO_IE);
+}
+
+static void unmask(unsigned int irq)
+{
+ u8 m = readb(S6_REG_GPIO + S6_GPIO_IE);
+ m |= 1 << (irq - IRQ_BASE);
+ writeb(m, S6_REG_GPIO + S6_GPIO_IE);
+}
+
+static int set_type(unsigned int irq, unsigned int type)
+{
+ const u8 m = 1 << (irq - IRQ_BASE);
+ irq_flow_handler_t handler;
+ struct irq_desc *desc;
+ u8 reg;
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL) & m)
+ || (readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE) & m)
+ || readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_DIR
+ + S6_GPIO_MASK(irq - IRQ_BASE)))
+ return 0;
+ type = IRQ_TYPE_EDGE_BOTH;
+ }
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
+ reg |= m;
+ handler = handle_level_irq;
+ } else {
+ reg &= ~m;
+ handler = handle_edge_irq;
+ }
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
+ desc = irq_to_desc(irq);
+ desc->handle_irq = handler;
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
+ if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING))
+ reg |= m;
+ else
+ reg &= ~m;
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ reg |= m;
+ else
+ reg &= ~m;
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
+ return 0;
+}
+
+static struct irq_chip gpioirqs = {
+ .name = "GPIO",
+ .ack = ack,
+ .mask = mask,
+ .unmask = unmask,
+ .set_type = set_type,
+};
+
+static u8 demux_masks[4];
+
+static void demux_irqs(unsigned int irq, struct irq_desc *desc)
+{
+ u8 *mask = get_irq_desc_data(desc);
+ u8 pending;
+ int cirq;
+
+ desc->chip->mask(irq);
+ desc->chip->ack(irq);
+ pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
+ cirq = IRQ_BASE - 1;
+ while (pending) {
+ int n = ffs(pending);
+ cirq += n;
+ pending >>= n;
+ generic_handle_irq(cirq);
+ }
+ desc->chip->unmask(irq);
+}
+
+extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
+
+void __init variant_init_irq(void)
+{
+ int irq, n;
+ writeb(0, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE);
+ for (irq = n = 0; irq < XTENSA_NR_IRQS; irq++) {
+ const signed char *mapping = platform_irq_mappings[irq];
+ int alone = 1;
+ u8 mask;
+ if (!mapping)
+ continue;
+ for(mask = 0; *mapping != -1; mapping++)
+ switch (*mapping) {
+ case S6_INTC_GPIO(0):
+ mask |= 1 << 0;
+ break;
+ case S6_INTC_GPIO(1):
+ mask |= 1 << 1;
+ break;
+ case S6_INTC_GPIO(2):
+ mask |= 1 << 2;
+ break;
+ case S6_INTC_GPIO(3):
+ mask |= 0x1f << 3;
+ break;
+ default:
+ alone = 0;
+ }
+ if (mask) {
+ int cirq, i;
+ if (!alone) {
+ printk(KERN_ERR "chained irq chips can't share"
+ " parent irq %i\n", irq);
+ continue;
+ }
+ demux_masks[n] = mask;
+ cirq = IRQ_BASE - 1;
+ do {
+ i = ffs(mask);
+ cirq += i;
+ mask >>= i;
+ set_irq_chip(cirq, &gpioirqs);
+ set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+ } while (mask);
+ set_irq_data(irq, demux_masks + n);
+ set_irq_chained_handler(irq, demux_irqs);
+ if (++n == ARRAY_SIZE(demux_masks))
+ break;
+ }
+ }
+}
diff --git a/arch/xtensa/variants/s6000/include/variant/dmac.h b/arch/xtensa/variants/s6000/include/variant/dmac.h
new file mode 100644
index 0000000..89ab948
--- /dev/null
+++ b/arch/xtensa/variants/s6000/include/variant/dmac.h
@@ -0,0 +1,387 @@
+/*
+ * include/asm-xtensa/variant-s6000/dmac.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ * Copyright (C) 2008 Emlix GmbH <info@emlix.com>
+ * Authors: Fabian Godehardt <fg@emlix.com>
+ * Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ */
+
+#ifndef __ASM_XTENSA_S6000_DMAC_H
+#define __ASM_XTENSA_S6000_DMAC_H
+#include <linux/io.h>
+#include <variant/hardware.h>
+
+/* DMA global */
+
+#define S6_DMA_INTSTAT0 0x000
+#define S6_DMA_INTSTAT1 0x004
+#define S6_DMA_INTENABLE0 0x008
+#define S6_DMA_INTENABLE1 0x00C
+#define S6_DMA_INTRAW0 0x010
+#define S6_DMA_INTRAW1 0x014
+#define S6_DMA_INTCLEAR0 0x018
+#define S6_DMA_INTCLEAR1 0x01C
+#define S6_DMA_INTSET0 0x020
+#define S6_DMA_INTSET1 0x024
+#define S6_DMA_INT0_UNDER 0
+#define S6_DMA_INT0_OVER 16
+#define S6_DMA_INT1_CHANNEL 0
+#define S6_DMA_INT1_MASTER 16
+#define S6_DMA_INT1_MASTER_MASK 7
+#define S6_DMA_TERMCNTIRQSTAT 0x028
+#define S6_DMA_TERMCNTIRQCLR 0x02C
+#define S6_DMA_TERMCNTIRQSET 0x030
+#define S6_DMA_PENDCNTIRQSTAT 0x034
+#define S6_DMA_PENDCNTIRQCLR 0x038
+#define S6_DMA_PENDCNTIRQSET 0x03C
+#define S6_DMA_LOWWMRKIRQSTAT 0x040
+#define S6_DMA_LOWWMRKIRQCLR 0x044
+#define S6_DMA_LOWWMRKIRQSET 0x048
+#define S6_DMA_MASTERERRINFO 0x04C
+#define S6_DMA_MASTERERR_CHAN(n) (4*(n))
+#define S6_DMA_MASTERERR_CHAN_MASK 0xF
+#define S6_DMA_DESCRFIFO0 0x050
+#define S6_DMA_DESCRFIFO1 0x054
+#define S6_DMA_DESCRFIFO2 0x058
+#define S6_DMA_DESCRFIFO2_AUTODISABLE 24
+#define S6_DMA_DESCRFIFO3 0x05C
+#define S6_DMA_MASTER0START 0x060
+#define S6_DMA_MASTER0END 0x064
+#define S6_DMA_MASTER1START 0x068
+#define S6_DMA_MASTER1END 0x06C
+#define S6_DMA_NEXTFREE 0x070
+#define S6_DMA_NEXTFREE_CHAN 0
+#define S6_DMA_NEXTFREE_CHAN_MASK 0x1F
+#define S6_DMA_NEXTFREE_ENA 16
+#define S6_DMA_NEXTFREE_ENA_MASK ((1 << 16) - 1)
+#define S6_DMA_DPORTCTRLGRP(p) ((p) * 4 + 0x074)
+#define S6_DMA_DPORTCTRLGRP_FRAMEREP 0
+#define S6_DMA_DPORTCTRLGRP_NRCHANS 1
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_1 0
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_3 1
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_4 2
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_2 3
+#define S6_DMA_DPORTCTRLGRP_ENA 31
+
+
+/* DMA per channel */
+
+#define DMA_CHNL(dmac, n) ((dmac) + 0x1000 + (n) * 0x100)
+#define DMA_INDEX_CHNL(addr) (((addr) >> 8) & 0xF)
+#define DMA_MASK_DMAC(addr) ((addr) & 0xFFFF0000)
+#define S6_DMA_CHNCTRL 0x000
+#define S6_DMA_CHNCTRL_ENABLE 0
+#define S6_DMA_CHNCTRL_PAUSE 1
+#define S6_DMA_CHNCTRL_PRIO 2
+#define S6_DMA_CHNCTRL_PRIO_MASK 3
+#define S6_DMA_CHNCTRL_PERIPHXFER 4
+#define S6_DMA_CHNCTRL_PERIPHENA 5
+#define S6_DMA_CHNCTRL_SRCINC 6
+#define S6_DMA_CHNCTRL_DSTINC 7
+#define S6_DMA_CHNCTRL_BURSTLOG 8
+#define S6_DMA_CHNCTRL_BURSTLOG_MASK 7
+#define S6_DMA_CHNCTRL_DESCFIFODEPTH 12
+#define S6_DMA_CHNCTRL_DESCFIFODEPTH_MASK 0x1F
+#define S6_DMA_CHNCTRL_DESCFIFOFULL 17
+#define S6_DMA_CHNCTRL_BWCONSEL 18
+#define S6_DMA_CHNCTRL_BWCONENA 19
+#define S6_DMA_CHNCTRL_PENDGCNTSTAT 20
+#define S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK 0x3F
+#define S6_DMA_CHNCTRL_LOWWMARK 26
+#define S6_DMA_CHNCTRL_LOWWMARK_MASK 0xF
+#define S6_DMA_CHNCTRL_TSTAMP 30
+#define S6_DMA_TERMCNTNB 0x004
+#define S6_DMA_TERMCNTNB_MASK 0xFFFF
+#define S6_DMA_TERMCNTTMO 0x008
+#define S6_DMA_TERMCNTSTAT 0x00C
+#define S6_DMA_TERMCNTSTAT_MASK 0xFF
+#define S6_DMA_CMONCHUNK 0x010
+#define S6_DMA_SRCSKIP 0x014
+#define S6_DMA_DSTSKIP 0x018
+#define S6_DMA_CUR_SRC 0x024
+#define S6_DMA_CUR_DST 0x028
+#define S6_DMA_TIMESTAMP 0x030
+
+/* DMA channel lists */
+
+#define S6_DPDMA_CHAN(stream, channel) (4 * (stream) + (channel))
+#define S6_DPDMA_NB 16
+
+#define S6_HIFDMA_GMACTX 0
+#define S6_HIFDMA_GMACRX 1
+#define S6_HIFDMA_I2S0 2
+#define S6_HIFDMA_I2S1 3
+#define S6_HIFDMA_EGIB 4
+#define S6_HIFDMA_PCITX 5
+#define S6_HIFDMA_PCIRX 6
+#define S6_HIFDMA_NB 7
+
+#define S6_NIDMA_NB 4
+
+#define S6_LMSDMA_NB 12
+
+/* controller access */
+
+#define S6_DMAC_NB 4
+#define S6_DMAC_INDEX(dmac) (((unsigned)(dmac) >> 18) % S6_DMAC_NB)
+
+struct s6dmac_ctrl {
+ u32 dmac;
+ spinlock_t lock;
+ u8 chan_nb;
+};
+
+extern struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
+
+
+/* DMA control, per channel */
+
+static inline int s6dmac_fifo_full(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ & (1 << S6_DMA_CHNCTRL_DESCFIFOFULL)) && 1;
+}
+
+static inline int s6dmac_termcnt_irq(u32 dmac, int chan)
+{
+ u32 m = 1 << chan;
+ int r = (readl(dmac + S6_DMA_TERMCNTIRQSTAT) & m) && 1;
+ if (r)
+ writel(m, dmac + S6_DMA_TERMCNTIRQCLR);
+ return r;
+}
+
+static inline int s6dmac_pendcnt_irq(u32 dmac, int chan)
+{
+ u32 m = 1 << chan;
+ int r = (readl(dmac + S6_DMA_PENDCNTIRQSTAT) & m) && 1;
+ if (r)
+ writel(m, dmac + S6_DMA_PENDCNTIRQCLR);
+ return r;
+}
+
+static inline int s6dmac_lowwmark_irq(u32 dmac, int chan)
+{
+ int r = (readl(dmac + S6_DMA_LOWWMRKIRQSTAT) & (1 << chan)) ? 1 : 0;
+ if (r)
+ writel(1 << chan, dmac + S6_DMA_LOWWMRKIRQCLR);
+ return r;
+}
+
+static inline u32 s6dmac_pending_count(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ >> S6_DMA_CHNCTRL_PENDGCNTSTAT)
+ & S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK;
+}
+
+static inline void s6dmac_set_terminal_count(u32 dmac, int chan, u32 n)
+{
+ n &= S6_DMA_TERMCNTNB_MASK;
+ n |= readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB)
+ & ~S6_DMA_TERMCNTNB_MASK;
+ writel(n, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
+}
+
+static inline u32 s6dmac_get_terminal_count(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB))
+ & S6_DMA_TERMCNTNB_MASK;
+}
+
+static inline u32 s6dmac_timestamp(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_TIMESTAMP);
+}
+
+static inline u32 s6dmac_cur_src(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_SRC);
+}
+
+static inline u32 s6dmac_cur_dst(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_DST);
+}
+
+static inline void s6dmac_disable_chan(u32 dmac, int chan)
+{
+ u32 ctrl;
+ writel(readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ & ~(1 << S6_DMA_CHNCTRL_ENABLE),
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ do
+ ctrl = readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ while (ctrl & (1 << S6_DMA_CHNCTRL_ENABLE));
+}
+
+static inline void s6dmac_set_stride_skip(u32 dmac, int chan,
+ int comchunk, /* 0: disable scatter/gather */
+ int srcskip, int dstskip)
+{
+ writel(comchunk, DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ writel(srcskip, DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
+ writel(dstskip, DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
+}
+
+static inline void s6dmac_enable_chan(u32 dmac, int chan,
+ int prio, /* 0 (highest) .. 3 (lowest) */
+ int periphxfer, /* <0: disable p.req.line, 0..1: mode */
+ int srcinc, int dstinc, /* 0: dont increment src/dst address */
+ int comchunk, /* 0: disable scatter/gather */
+ int srcskip, int dstskip,
+ int burstsize, /* 4 for I2S, 7 for everything else */
+ int bandwidthconserve, /* <0: disable, 0..1: select */
+ int lowwmark, /* 0..15 */
+ int timestamp, /* 0: disable timestamp */
+ int enable) /* 0: disable for now */
+{
+ writel(1, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
+ writel(0, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTTMO);
+ writel(lowwmark << S6_DMA_CHNCTRL_LOWWMARK,
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ s6dmac_set_stride_skip(dmac, chan, comchunk, srcskip, dstskip);
+ writel(((enable ? 1 : 0) << S6_DMA_CHNCTRL_ENABLE) |
+ (prio << S6_DMA_CHNCTRL_PRIO) |
+ (((periphxfer > 0) ? 1 : 0) << S6_DMA_CHNCTRL_PERIPHXFER) |
+ (((periphxfer < 0) ? 0 : 1) << S6_DMA_CHNCTRL_PERIPHENA) |
+ ((srcinc ? 1 : 0) << S6_DMA_CHNCTRL_SRCINC) |
+ ((dstinc ? 1 : 0) << S6_DMA_CHNCTRL_DSTINC) |
+ (burstsize << S6_DMA_CHNCTRL_BURSTLOG) |
+ (((bandwidthconserve > 0) ? 1 : 0) << S6_DMA_CHNCTRL_BWCONSEL) |
+ (((bandwidthconserve < 0) ? 0 : 1) << S6_DMA_CHNCTRL_BWCONENA) |
+ (lowwmark << S6_DMA_CHNCTRL_LOWWMARK) |
+ ((timestamp ? 1 : 0) << S6_DMA_CHNCTRL_TSTAMP),
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+}
+
+
+/* DMA control, per engine */
+
+static inline unsigned _dmac_addr_index(u32 dmac)
+{
+ unsigned i = S6_DMAC_INDEX(dmac);
+ if (s6dmac_ctrl[i].dmac != dmac)
+ BUG();
+ return i;
+}
+
+static inline void _s6dmac_disable_error_irqs(u32 dmac, u32 mask)
+{
+ writel(mask, dmac + S6_DMA_TERMCNTIRQCLR);
+ writel(mask, dmac + S6_DMA_PENDCNTIRQCLR);
+ writel(mask, dmac + S6_DMA_LOWWMRKIRQCLR);
+ writel(readl(dmac + S6_DMA_INTENABLE0)
+ & ~((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER)),
+ dmac + S6_DMA_INTENABLE0);
+ writel(readl(dmac + S6_DMA_INTENABLE1) & ~(mask << S6_DMA_INT1_CHANNEL),
+ dmac + S6_DMA_INTENABLE1);
+ writel((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER),
+ dmac + S6_DMA_INTCLEAR0);
+ writel(mask << S6_DMA_INT1_CHANNEL, dmac + S6_DMA_INTCLEAR1);
+}
+
+/*
+ * request channel from specified engine
+ * with chan<0, accept any channel
+ * further parameters see s6dmac_enable_chan
+ * returns < 0 upon error, channel nb otherwise
+ */
+static inline int s6dmac_request_chan(u32 dmac, int chan,
+ int prio,
+ int periphxfer,
+ int srcinc, int dstinc,
+ int comchunk,
+ int srcskip, int dstskip,
+ int burstsize,
+ int bandwidthconserve,
+ int lowwmark,
+ int timestamp,
+ int enable)
+{
+ int r = chan;
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ if (r < 0) {
+ r = (readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_CHAN)
+ & S6_DMA_NEXTFREE_CHAN_MASK;
+ }
+ if (r >= s6dmac_ctrl[_dmac_addr_index(dmac)].chan_nb) {
+ if (chan < 0)
+ r = -EBUSY;
+ else
+ r = -ENXIO;
+ } else if (((readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_ENA)
+ >> r) & 1) {
+ r = -EBUSY;
+ } else {
+ s6dmac_enable_chan(dmac, r, prio, periphxfer,
+ srcinc, dstinc, comchunk, srcskip, dstskip, burstsize,
+ bandwidthconserve, lowwmark, timestamp, enable);
+ }
+ spin_unlock_irqrestore(spinl, flags);
+ return r;
+}
+
+static inline void s6dmac_put_fifo(u32 dmac, int chan,
+ u32 src, u32 dst, u32 size)
+{
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ writel(src, dmac + S6_DMA_DESCRFIFO0);
+ writel(dst, dmac + S6_DMA_DESCRFIFO1);
+ writel(size, dmac + S6_DMA_DESCRFIFO2);
+ writel(chan, dmac + S6_DMA_DESCRFIFO3);
+ spin_unlock_irqrestore(spinl, flags);
+}
+
+static inline u32 s6dmac_channel_enabled(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) &
+ (1 << S6_DMA_CHNCTRL_ENABLE);
+}
+
+/*
+ * group 1-4 data port channels
+ * with port=0..3, nrch=1-4 channels,
+ * frrep=0/1 (dis- or enable frame repeat)
+ */
+static inline void s6dmac_dp_setup_group(u32 dmac, int port,
+ int nrch, int frrep)
+{
+ const static u8 mask[4] = {0, 3, 1, 2};
+ BUG_ON(dmac != S6_REG_DPDMA);
+ if ((port < 0) || (port > 3) || (nrch < 1) || (nrch > 4))
+ return;
+ writel((mask[nrch - 1] << S6_DMA_DPORTCTRLGRP_NRCHANS)
+ | ((frrep ? 1 : 0) << S6_DMA_DPORTCTRLGRP_FRAMEREP),
+ dmac + S6_DMA_DPORTCTRLGRP(port));
+}
+
+static inline void s6dmac_dp_switch_group(u32 dmac, int port, int enable)
+{
+ u32 tmp;
+ BUG_ON(dmac != S6_REG_DPDMA);
+ tmp = readl(dmac + S6_DMA_DPORTCTRLGRP(port));
+ if (enable)
+ tmp |= (1 << S6_DMA_DPORTCTRLGRP_ENA);
+ else
+ tmp &= ~(1 << S6_DMA_DPORTCTRLGRP_ENA);
+ writel(tmp, dmac + S6_DMA_DPORTCTRLGRP(port));
+}
+
+extern void s6dmac_put_fifo_cache(u32 dmac, int chan,
+ u32 src, u32 dst, u32 size);
+extern void s6dmac_disable_error_irqs(u32 dmac, u32 mask);
+extern u32 s6dmac_int_sources(u32 dmac, u32 channel);
+extern void s6dmac_release_chan(u32 dmac, int chan);
+
+#endif /* __ASM_XTENSA_S6000_DMAC_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/gpio.h b/arch/xtensa/variants/s6000/include/variant/gpio.h
index 8327f62..8484ab0 100644
--- a/arch/xtensa/variants/s6000/include/variant/gpio.h
+++ b/arch/xtensa/variants/s6000/include/variant/gpio.h
@@ -1,6 +1,6 @@
#ifndef _XTENSA_VARIANT_S6000_GPIO_H
#define _XTENSA_VARIANT_S6000_GPIO_H
-extern int s6_gpio_init(void);
+extern int s6_gpio_init(u32 afsel);
#endif /* _XTENSA_VARIANT_S6000_GPIO_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/irq.h b/arch/xtensa/variants/s6000/include/variant/irq.h
index fa031cb..97d6fc4 100644
--- a/arch/xtensa/variants/s6000/include/variant/irq.h
+++ b/arch/xtensa/variants/s6000/include/variant/irq.h
@@ -1,9 +1,9 @@
-#ifndef __XTENSA_S6000_IRQ_H
-#define __XTENSA_S6000_IRQ_H
+#ifndef _XTENSA_S6000_IRQ_H
+#define _XTENSA_S6000_IRQ_H
#define NO_IRQ (-1)
+#define VARIANT_NR_IRQS 8 /* GPIO interrupts */
extern void variant_irq_enable(unsigned int irq);
-extern void variant_irq_disable(unsigned int irq);
#endif /* __XTENSA_S6000_IRQ_H */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 431f8b4..7ec7d88 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -266,6 +266,7 @@
config ACPI_PCI_SLOT
tristate "PCI slot detection driver"
+ depends on SYSFS
default n
help
This driver creates entries in /sys/bus/pci/slots/ for all PCI
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2aa1908..b17c57f 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -679,6 +679,14 @@
If unsure, say N.
+config PATA_AT91
+ tristate "PATA support for AT91SAM9260"
+ depends on ARM && ARCH_AT91
+ help
+ This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
+
+ If unsure, say N.
+
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
depends on PATA_PLATFORM && PPC_OF
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 1558059..38906f9 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -72,6 +72,7 @@
obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
+obj-$(CONFIG_PATA_AT91) += pata_at91.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
# Should be last but two libata driver
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ca4d208..045a486 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -125,19 +125,19 @@
static int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
-MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
static int atapi_dmadir = 0;
module_param(atapi_dmadir, int, 0444);
-MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
int atapi_passthru16 = 1;
module_param(atapi_passthru16, int, 0444);
-MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
int libata_fua = 0;
module_param_named(fua, libata_fua, int, 0444);
-MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
+MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
static int ata_ignore_hpa;
module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
@@ -153,11 +153,11 @@
int libata_noacpi = 0;
module_param_named(noacpi, libata_noacpi, int, 0444);
-MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
+MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
-MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
+MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
@@ -1993,11 +1993,17 @@
* Check if the current speed of the device requires IORDY. Used
* by various controllers for chip configuration.
*/
-
unsigned int ata_pio_need_iordy(const struct ata_device *adev)
{
- /* Controller doesn't support IORDY. Probably a pointless check
- as the caller should know this */
+ /* Don't set IORDY if we're preparing for reset. IORDY may
+ * lead to controller lock up on certain controllers if the
+ * port is not occupied. See bko#11703 for details.
+ */
+ if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
+ return 0;
+ /* Controller doesn't support IORDY. Probably a pointless
+ * check as the caller should know this.
+ */
if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
return 0;
/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
@@ -2020,7 +2026,6 @@
* Compute the highest mode possible if we are not using iordy. Return
* -1 if no iordy mode is available.
*/
-
static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
{
/* If we have no drive specific rule, then PIO 2 is non IORDY */
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
new file mode 100644
index 0000000..4b27617
--- /dev/null
+++ b/drivers/ata/pata_at91.c
@@ -0,0 +1,361 @@
+/*
+ * PATA driver for AT91SAM9260 Static Memory Controller
+ * with CompactFlash interface in True IDE mode
+ *
+ * Copyright (C) 2009 Matyukevich Sergey
+ *
+ * Based on:
+ * * generic platform driver by Paul Mundt: drivers/ata/pata_platform.c
+ * * pata_at32 driver by Kristoffer Nyborg Gregertsen
+ * * at91_ide driver by Stanislaw Gruszka
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/clk.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+
+#include <mach/at91sam9260_matrix.h>
+#include <mach/at91sam9_smc.h>
+#include <mach/at91sam9260.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+
+
+#define DRV_NAME "pata_at91"
+#define DRV_VERSION "0.1"
+
+#define CF_IDE_OFFSET 0x00c00000
+#define CF_ALT_IDE_OFFSET 0x00e00000
+#define CF_IDE_RES_SIZE 0x08
+
+struct at91_ide_info {
+ unsigned long mode;
+ unsigned int cs;
+
+ void __iomem *ide_addr;
+ void __iomem *alt_addr;
+};
+
+const struct ata_timing initial_timing =
+ {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
+
+static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
+{
+ unsigned long mul;
+
+ /*
+ * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] =
+ * x * (f / 1_000_000_000) =
+ * x * ((f * 65536) / 1_000_000_000) / 65536 =
+ * x * (((f / 10_000) * 65536) / 100_000) / 65536 =
+ */
+
+ mul = (mck_hz / 10000) << 16;
+ mul /= 100000;
+
+ return (ns * mul + 65536) >> 16; /* rounding */
+}
+
+static void set_smc_mode(struct at91_ide_info *info)
+{
+ at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
+ return;
+}
+
+static void set_smc_timing(struct device *dev,
+ struct at91_ide_info *info, const struct ata_timing *ata)
+{
+ int read_cycle, write_cycle, active, recover;
+ int nrd_setup, nrd_pulse, nrd_recover;
+ int nwe_setup, nwe_pulse;
+
+ int ncs_write_setup, ncs_write_pulse;
+ int ncs_read_setup, ncs_read_pulse;
+
+ unsigned int mck_hz;
+ struct clk *mck;
+
+ read_cycle = ata->cyc8b;
+ nrd_setup = ata->setup;
+ nrd_pulse = ata->act8b;
+ nrd_recover = ata->rec8b;
+
+ mck = clk_get(NULL, "mck");
+ BUG_ON(IS_ERR(mck));
+ mck_hz = clk_get_rate(mck);
+
+ read_cycle = calc_mck_cycles(read_cycle, mck_hz);
+ nrd_setup = calc_mck_cycles(nrd_setup, mck_hz);
+ nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz);
+ nrd_recover = calc_mck_cycles(nrd_recover, mck_hz);
+
+ clk_put(mck);
+
+ active = nrd_setup + nrd_pulse;
+ recover = read_cycle - active;
+
+ /* Need at least two cycles recovery */
+ if (recover < 2)
+ read_cycle = active + 2;
+
+ /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
+ ncs_read_setup = 1;
+ ncs_read_pulse = read_cycle - 2;
+
+ /* Write timings same as read timings */
+ write_cycle = read_cycle;
+ nwe_setup = nrd_setup;
+ nwe_pulse = nrd_pulse;
+ ncs_write_setup = ncs_read_setup;
+ ncs_write_pulse = ncs_read_pulse;
+
+ dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n",
+ nrd_setup, nrd_pulse, read_cycle);
+ dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n",
+ nwe_setup, nwe_pulse, write_cycle);
+ dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n",
+ ncs_read_setup, ncs_read_pulse);
+ dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n",
+ ncs_write_setup, ncs_write_pulse);
+
+ at91_sys_write(AT91_SMC_SETUP(info->cs),
+ AT91_SMC_NWESETUP_(nwe_setup) |
+ AT91_SMC_NRDSETUP_(nrd_setup) |
+ AT91_SMC_NCS_WRSETUP_(ncs_write_setup) |
+ AT91_SMC_NCS_RDSETUP_(ncs_read_setup));
+
+ at91_sys_write(AT91_SMC_PULSE(info->cs),
+ AT91_SMC_NWEPULSE_(nwe_pulse) |
+ AT91_SMC_NRDPULSE_(nrd_pulse) |
+ AT91_SMC_NCS_WRPULSE_(ncs_write_pulse) |
+ AT91_SMC_NCS_RDPULSE_(ncs_read_pulse));
+
+ at91_sys_write(AT91_SMC_CYCLE(info->cs),
+ AT91_SMC_NWECYCLE_(write_cycle) |
+ AT91_SMC_NRDCYCLE_(read_cycle));
+
+ return;
+}
+
+static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct at91_ide_info *info = ap->host->private_data;
+ struct ata_timing timing;
+ int ret;
+
+ /* Compute ATA timing and set it to SMC */
+ ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
+ if (ret) {
+ dev_warn(ap->dev, "Failed to compute ATA timing %d, \
+ set PIO_0 timing\n", ret);
+ set_smc_timing(ap->dev, info, &initial_timing);
+ } else {
+ set_smc_timing(ap->dev, info, &timing);
+ }
+
+ /* Setup SMC mode */
+ set_smc_mode(info);
+
+ return;
+}
+
+static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ struct at91_ide_info *info = dev->link->ap->host->private_data;
+ unsigned int consumed;
+ unsigned long flags;
+ unsigned int mode;
+
+ local_irq_save(flags);
+ mode = at91_sys_read(AT91_SMC_MODE(info->cs));
+
+ /* set 16bit mode before writing data */
+ at91_sys_write(AT91_SMC_MODE(info->cs),
+ (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16);
+
+ consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
+
+ /* restore 8bit mode after data is written */
+ at91_sys_write(AT91_SMC_MODE(info->cs),
+ (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8);
+
+ local_irq_restore(flags);
+ return consumed;
+}
+
+static struct scsi_host_template pata_at91_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_at91_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .sff_data_xfer = pata_at91_data_xfer_noirq,
+ .set_piomode = pata_at91_set_piomode,
+ .cable_detect = ata_cable_40wire,
+ .port_start = ATA_OP_NULL,
+};
+
+static int __devinit pata_at91_probe(struct platform_device *pdev)
+{
+ struct at91_cf_data *board = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct at91_ide_info *info;
+ struct resource *mem_res;
+ struct ata_host *host;
+ struct ata_port *ap;
+ int irq_flags = 0;
+ int irq = 0;
+ int ret;
+
+ /* get platform resources: IO/CTL memories and irq/rst pins */
+
+ if (pdev->num_resources != 1) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem_res) {
+ dev_err(dev, "failed to get mem resource\n");
+ return -EINVAL;
+ }
+
+ irq = board->irq_pin;
+
+ /* init ata host */
+
+ host = ata_host_alloc(dev, 1);
+
+ if (!host)
+ return -ENOMEM;
+
+ ap = host->ports[0];
+ ap->ops = &pata_at91_port_ops;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->pio_mask = ATA_PIO4;
+
+ if (!irq) {
+ ap->flags |= ATA_FLAG_PIO_POLLING;
+ ata_port_desc(ap, "no IRQ, using PIO polling");
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (!info) {
+ dev_err(dev, "failed to allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ info->cs = board->chipselect;
+ info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
+ AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT |
+ AT91_SMC_DBW_8 | AT91_SMC_TDF_(0);
+
+ info->ide_addr = devm_ioremap(dev,
+ mem_res->start + CF_IDE_OFFSET, CF_IDE_RES_SIZE);
+
+ if (!info->ide_addr) {
+ dev_err(dev, "failed to map IO base\n");
+ ret = -ENOMEM;
+ goto err_ide_ioremap;
+ }
+
+ info->alt_addr = devm_ioremap(dev,
+ mem_res->start + CF_ALT_IDE_OFFSET, CF_IDE_RES_SIZE);
+
+ if (!info->alt_addr) {
+ dev_err(dev, "failed to map CTL base\n");
+ ret = -ENOMEM;
+ goto err_alt_ioremap;
+ }
+
+ ap->ioaddr.cmd_addr = info->ide_addr;
+ ap->ioaddr.ctl_addr = info->alt_addr + 0x06;
+ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ata_port_desc(ap, "mmio cmd 0x%llx ctl 0x%llx",
+ (unsigned long long)mem_res->start + CF_IDE_OFFSET,
+ (unsigned long long)mem_res->start + CF_ALT_IDE_OFFSET);
+
+ host->private_data = info;
+
+ return ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
+ irq ? ata_sff_interrupt : NULL,
+ irq_flags, &pata_at91_sht);
+
+err_alt_ioremap:
+ devm_iounmap(dev, info->ide_addr);
+
+err_ide_ioremap:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit pata_at91_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct at91_ide_info *info = host->private_data;
+ struct device *dev = &pdev->dev;
+
+ if (!host)
+ return 0;
+
+ ata_host_detach(host);
+
+ if (!info)
+ return 0;
+
+ devm_iounmap(dev, info->ide_addr);
+ devm_iounmap(dev, info->alt_addr);
+
+ kfree(info);
+ return 0;
+}
+
+static struct platform_driver pata_at91_driver = {
+ .probe = pata_at91_probe,
+ .remove = __devexit_p(pata_at91_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pata_at91_init(void)
+{
+ return platform_driver_register(&pata_at91_driver);
+}
+
+static void __exit pata_at91_exit(void)
+{
+ platform_driver_unregister(&pata_at91_driver);
+}
+
+
+module_init(pata_at91_init);
+module_exit(pata_at91_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC");
+MODULE_AUTHOR("Matyukevich Sergey");
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 36b8629..94eaa43 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1378,6 +1378,37 @@
return 0;
}
+#ifdef CONFIG_PM
+static int sata_fsl_suspend(struct of_device *op, pm_message_t state)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ return ata_host_suspend(host, state);
+}
+
+static int sata_fsl_resume(struct of_device *op)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ struct sata_fsl_host_priv *host_priv = host->private_data;
+ int ret;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ struct ata_port *ap = host->ports[0];
+ struct sata_fsl_port_priv *pp = ap->private_data;
+
+ ret = sata_fsl_init_controller(host);
+ if (ret) {
+ dev_printk(KERN_ERR, &op->dev,
+ "Error initialize hardware\n");
+ return ret;
+ }
+
+ /* Recovery the CHBA register in host controller cmd register set */
+ iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
static struct of_device_id fsl_sata_match[] = {
{
.compatible = "fsl,pq-sata",
@@ -1392,6 +1423,10 @@
.match_table = fsl_sata_match,
.probe = sata_fsl_probe,
.remove = sata_fsl_remove,
+#ifdef CONFIG_PM
+ .suspend = sata_fsl_suspend,
+ .resume = sata_fsl_resume,
+#endif
};
static int __init sata_fsl_init(void)
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index 44c113d..1d7c34c 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -8,6 +8,10 @@
* Licensed under the GPL-2 or later.
*/
+#define DRV_NAME "bfin-jtag-comm"
+#define DEV_NAME "ttyBFJC"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
#include <linux/circ_buf.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -22,18 +26,14 @@
#include <linux/tty_flip.h>
#include <asm/atomic.h>
+#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
+
/* See the Debug/Emulation chapter in the HRM */
#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
-#define DRV_NAME "bfin-jtag-comm"
-#define DEV_NAME "ttyBFJC"
-
-#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
-#define debug(fmt, args...) pr_debug(DRV_NAME ": " fmt, ## args)
-
static inline uint32_t bfin_write_emudat(uint32_t emudat)
{
__asm__ __volatile__("emudat = %0;" : : "d"(emudat));
@@ -74,7 +74,7 @@
while (!kthread_should_stop()) {
/* no one left to give data to, so sleep */
if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
- debug("waiting for readers\n");
+ pr_debug("waiting for readers\n");
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
@@ -82,7 +82,7 @@
/* no data available, so just chill */
if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
- debug("waiting for data (in_len = %i) (circ: %i %i)\n",
+ pr_debug("waiting for data (in_len = %i) (circ: %i %i)\n",
inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
if (inbound_len)
schedule();
@@ -99,11 +99,11 @@
if (tty != NULL) {
uint32_t emudat = bfin_read_emudat();
if (inbound_len == 0) {
- debug("incoming length: 0x%08x\n", emudat);
+ pr_debug("incoming length: 0x%08x\n", emudat);
inbound_len = emudat;
} else {
size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
- debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
inbound_len -= num_chars;
tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
tty_flip_buffer_push(tty);
@@ -117,7 +117,7 @@
if (outbound_len == 0) {
outbound_len = circ_cnt(&bfin_jc_write_buf);
bfin_write_emudat(outbound_len);
- debug("outgoing length: 0x%08x\n", outbound_len);
+ pr_debug("outgoing length: 0x%08x\n", outbound_len);
} else {
struct tty_struct *tty;
int tail = bfin_jc_write_buf.tail;
@@ -136,7 +136,7 @@
if (tty)
tty_wakeup(tty);
mutex_unlock(&bfin_jc_tty_mutex);
- debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
+ pr_debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
}
}
}
@@ -149,7 +149,7 @@
bfin_jc_open(struct tty_struct *tty, struct file *filp)
{
mutex_lock(&bfin_jc_tty_mutex);
- debug("open %lu\n", bfin_jc_count);
+ pr_debug("open %lu\n", bfin_jc_count);
++bfin_jc_count;
bfin_jc_tty = tty;
wake_up_process(bfin_jc_kthread);
@@ -161,7 +161,7 @@
bfin_jc_close(struct tty_struct *tty, struct file *filp)
{
mutex_lock(&bfin_jc_tty_mutex);
- debug("close %lu\n", bfin_jc_count);
+ pr_debug("close %lu\n", bfin_jc_count);
if (--bfin_jc_count == 0)
bfin_jc_tty = NULL;
wake_up_process(bfin_jc_kthread);
@@ -174,7 +174,7 @@
{
int i;
count = min(count, circ_free(&bfin_jc_write_buf));
- debug("going to write chunk of %i bytes\n", count);
+ pr_debug("going to write chunk of %i bytes\n", count);
for (i = 0; i < count; ++i)
circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
bfin_jc_write_buf.head += i;
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 6799588..65b6ff2 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1189,11 +1189,6 @@
return -ENODEV;
}
- if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
- retval = -ENODEV;
- goto out_unlock;
- }
-
ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
ch->port.count++;
tty->driver_data = ch;
@@ -1218,8 +1213,8 @@
moxa_close_port(tty);
} else
ch->port.flags |= ASYNC_NORMAL_ACTIVE;
-out_unlock:
mutex_unlock(&moxa_openlock);
+
return retval;
}
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 461ece5..1c43c8c 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -10,7 +10,6 @@
* Paul Mackerras <Paul.Mackerras@cs.anu.edu.au>
*
* Original release 01/11/99
- * $Id: n_hdlc.c,v 4.8 2003/05/06 21:18:51 paulkf Exp $
*
* This code is released under the GNU General Public License (GPL)
*
@@ -79,7 +78,6 @@
*/
#define HDLC_MAGIC 0x239e
-#define HDLC_VERSION "$Revision: 4.8 $"
#include <linux/module.h>
#include <linux/init.h>
@@ -114,7 +112,7 @@
#define MAX_HDLC_FRAME_SIZE 65535
#define DEFAULT_RX_BUF_COUNT 10
#define MAX_RX_BUF_COUNT 60
-#define DEFAULT_TX_BUF_COUNT 1
+#define DEFAULT_TX_BUF_COUNT 3
struct n_hdlc_buf {
struct n_hdlc_buf *link;
@@ -199,6 +197,31 @@
#define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data))
#define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty)
+static void flush_rx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf);
+}
+
+static void flush_tx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+ unsigned long flags;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+ if (n_hdlc->tbuf) {
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
+ n_hdlc->tbuf = NULL;
+ }
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+}
+
static struct tty_ldisc_ops n_hdlc_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
@@ -211,6 +234,7 @@
.poll = n_hdlc_tty_poll,
.receive_buf = n_hdlc_tty_receive,
.write_wakeup = n_hdlc_tty_wakeup,
+ .flush_buffer = flush_rx_queue,
};
/**
@@ -341,10 +365,7 @@
set_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
#endif
- /* Flush any pending characters in the driver and discipline. */
- if (tty->ldisc->ops->flush_buffer)
- tty->ldisc->ops->flush_buffer(tty);
-
+ /* flush receive data from driver */
tty_driver_flush_buffer(tty);
if (debuglevel >= DEBUG_LEVEL_INFO)
@@ -763,6 +784,14 @@
error = put_user(count, (int __user *)arg);
break;
+ case TCFLSH:
+ switch (arg) {
+ case TCIOFLUSH:
+ case TCOFLUSH:
+ flush_tx_queue(tty);
+ }
+ /* fall through to default */
+
default:
error = n_tty_ioctl_helper(tty, file, cmd, arg);
break;
@@ -919,8 +948,7 @@
} /* end of n_hdlc_buf_get() */
static char hdlc_banner[] __initdata =
- KERN_INFO "HDLC line discipline: version " HDLC_VERSION
- ", maxframe=%u\n";
+ KERN_INFO "HDLC line discipline maxframe=%u\n";
static char hdlc_register_ok[] __initdata =
KERN_INFO "N_HDLC line discipline registered.\n";
static char hdlc_register_fail[] __initdata =
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index d2e93e3..2e99158 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -1062,7 +1062,7 @@
struct r3964_client_info *pClient;
struct r3964_message *pMsg;
struct r3964_client_message theMsg;
- int count;
+ int ret;
TRACE_L("read()");
@@ -1074,8 +1074,8 @@
if (pMsg == NULL) {
/* no messages available. */
if (file->f_flags & O_NONBLOCK) {
- unlock_kernel();
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto unlock;
}
/* block until there is a message: */
wait_event_interruptible(pInfo->read_wait,
@@ -1085,29 +1085,31 @@
/* If we still haven't got a message, we must have been signalled */
if (!pMsg) {
- unlock_kernel();
- return -EINTR;
+ ret = -EINTR;
+ goto unlock;
}
/* deliver msg to client process: */
theMsg.msg_id = pMsg->msg_id;
theMsg.arg = pMsg->arg;
theMsg.error_code = pMsg->error_code;
- count = sizeof(struct r3964_client_message);
+ ret = sizeof(struct r3964_client_message);
kfree(pMsg);
TRACE_M("r3964_read - msg kfree %p", pMsg);
- if (copy_to_user(buf, &theMsg, count)) {
- unlock_kernel();
- return -EFAULT;
+ if (copy_to_user(buf, &theMsg, ret)) {
+ ret = -EFAULT;
+ goto unlock;
}
- TRACE_PS("read - return %d", count);
- return count;
+ TRACE_PS("read - return %d", ret);
+ goto unlock;
}
+ ret = -EPERM;
+unlock:
unlock_kernel();
- return -EPERM;
+ return ret;
}
static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index dbb9125..881934c 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1575,7 +1575,8 @@
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq);
- return 0;
+ rc = 0;
+ break;
case CM_IOCSPTS:
{
struct ptsreq krnptsreq;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index e6ce632..7539bed 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -396,7 +396,8 @@
kbd = kbd_table + console;
switch (cmd) {
case TIOCLINUX:
- return tioclinux(tty, arg);
+ ret = tioclinux(tty, arg);
+ break;
case KIOCSOUND:
if (!perm)
goto eperm;
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 87f92c3..a9952b1 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -18,9 +18,22 @@
#include <linux/percpu.h>
#include <linux/smp.h>
#include <asm/byteorder.h>
+#include <asm/processor.h>
#include <asm/i387.h>
#include "padlock.h"
+/*
+ * Number of data blocks actually fetched for each xcrypt insn.
+ * Processors with prefetch errata will fetch extra blocks.
+ */
+static unsigned int ecb_fetch_blocks = 2;
+#define MAX_ECB_FETCH_BLOCKS (8)
+#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
+
+static unsigned int cbc_fetch_blocks = 1;
+#define MAX_CBC_FETCH_BLOCKS (4)
+#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
+
/* Control word. */
struct cword {
unsigned int __attribute__ ((__packed__))
@@ -172,73 +185,111 @@
* should be used only inside the irq_ts_save/restore() context
*/
-static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
- struct cword *control_word)
+static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
+ struct cword *control_word, int count)
{
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(1));
+ : "d"(control_word), "b"(key), "c"(count));
}
-static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
+static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
+ u8 *iv, struct cword *control_word, int count)
{
- u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
+ : "+S" (input), "+D" (output), "+a" (iv)
+ : "d" (control_word), "b" (key), "c" (count));
+ return iv;
+}
+
+static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
+ struct cword *cword, int count)
+{
+ /*
+ * Padlock prefetches extra data so we must provide mapped input buffers.
+ * Assume there are at least 16 bytes of stack already in use.
+ */
+ u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- memcpy(tmp, in, AES_BLOCK_SIZE);
- padlock_xcrypt(tmp, out, key, cword);
+ memcpy(tmp, in, count * AES_BLOCK_SIZE);
+ rep_xcrypt_ecb(tmp, out, key, cword, count);
}
-static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
- struct cword *cword)
+static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
+ u8 *iv, struct cword *cword, int count)
{
- /* padlock_xcrypt requires at least two blocks of data. */
- if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
- (PAGE_SIZE - 1)))) {
- aes_crypt_copy(in, out, key, cword);
+ /*
+ * Padlock prefetches extra data so we must provide mapped input buffers.
+ * Assume there are at least 16 bytes of stack already in use.
+ */
+ u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
+ u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
+
+ memcpy(tmp, in, count * AES_BLOCK_SIZE);
+ return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
+}
+
+static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
+ struct cword *cword, int count)
+{
+ /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
+ * We could avoid some copying here but it's probably not worth it.
+ */
+ if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
+ ecb_crypt_copy(in, out, key, cword, count);
return;
}
- padlock_xcrypt(in, out, key, cword);
+ rep_xcrypt_ecb(in, out, key, cword, count);
+}
+
+static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
+ u8 *iv, struct cword *cword, int count)
+{
+ /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
+ if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
+ return cbc_crypt_copy(in, out, key, iv, cword, count);
+
+ return rep_xcrypt_cbc(in, out, key, iv, cword, count);
}
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
void *control_word, u32 count)
{
- if (count == 1) {
- aes_crypt(input, output, key, control_word);
+ u32 initial = count & (ecb_fetch_blocks - 1);
+
+ if (count < ecb_fetch_blocks) {
+ ecb_crypt(input, output, key, control_word, count);
return;
}
- asm volatile ("test $1, %%cl;"
- "je 1f;"
-#ifndef CONFIG_X86_64
- "lea -1(%%ecx), %%eax;"
- "mov $1, %%ecx;"
-#else
- "lea -1(%%rcx), %%rax;"
- "mov $1, %%rcx;"
-#endif
- ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
-#ifndef CONFIG_X86_64
- "mov %%eax, %%ecx;"
-#else
- "mov %%rax, %%rcx;"
-#endif
- "1:"
- ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
+ if (initial)
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
+ : "+S"(input), "+D"(output)
+ : "d"(control_word), "b"(key), "c"(initial));
+
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(count)
- : "ax");
+ : "d"(control_word), "b"(key), "c"(count - initial));
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
u8 *iv, void *control_word, u32 count)
{
- /* rep xcryptcbc */
- asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
+ u32 initial = count & (cbc_fetch_blocks - 1);
+
+ if (count < cbc_fetch_blocks)
+ return cbc_crypt(input, output, key, iv, control_word, count);
+
+ if (initial)
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
+ : "+S" (input), "+D" (output), "+a" (iv)
+ : "d" (control_word), "b" (key), "c" (count));
+
+ asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count));
+ : "d" (control_word), "b" (key), "c" (count-initial));
return iv;
}
@@ -249,7 +300,7 @@
padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
- aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
+ ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -261,7 +312,7 @@
padlock_reset_key(&ctx->cword.encrypt);
ts_state = irq_ts_save();
- aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
+ ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -454,6 +505,7 @@
static int __init padlock_init(void)
{
int ret;
+ struct cpuinfo_x86 *c = &cpu_data(0);
if (!cpu_has_xcrypt) {
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
@@ -476,6 +528,12 @@
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
+ if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
+ ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
+ cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
+ printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
+ }
+
out:
return ret;
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 4509024..13efcd3 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,28 +1,29 @@
-comment "A new alternative FireWire stack is available with EXPERIMENTAL=y"
- depends on EXPERIMENTAL=n
-
-comment "Enable only one of the two stacks, unless you know what you are doing"
- depends on EXPERIMENTAL
+comment "You can enable one or both FireWire driver stacks."
+comment "See the help texts for more information."
config FIREWIRE
- tristate "New FireWire stack, EXPERIMENTAL"
- depends on EXPERIMENTAL
+ tristate "FireWire driver stack"
select CRC_ITU_T
help
- This is the "Juju" FireWire stack, a new alternative implementation
- designed for robustness and simplicity. You can build either this
- stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both.
- Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
- before you enable the new stack.
+ This is the new-generation IEEE 1394 (FireWire) driver stack
+ a.k.a. Juju, a new implementation designed for robustness and
+ simplicity.
+ See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
+ for information about migration from the older Linux 1394 stack
+ to the new driver stack.
To compile this driver as a module, say M here: the module will be
called firewire-core.
This module functionally replaces ieee1394, raw1394, and video1394.
To access it from application programs, you generally need at least
- libraw1394 version 2. IIDC/DCAM applications also need libdc1394
- version 2. No libraries are required to access storage devices
- through the firewire-sbp2 driver.
+ libraw1394 v2. IIDC/DCAM applications need libdc1394 v2.
+ No libraries are required to access storage devices through the
+ firewire-sbp2 driver.
+
+ NOTE:
+ FireWire audio devices currently require the old drivers (ieee1394,
+ ohci1394, raw1394).
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
@@ -37,11 +38,9 @@
stack.
NOTE:
-
- You should only build either firewire-ohci or the old ohci1394 driver,
- but not both. If you nevertheless want to install both, you should
- configure them only as modules and blacklist the driver(s) which you
- don't want to have auto-loaded. Add either
+ If you want to install firewire-ohci and ohci1394 together, you
+ should configure them only as modules and blacklist the driver(s)
+ which you don't want to have auto-loaded. Add either
blacklist firewire-ohci
or
@@ -50,12 +49,7 @@
blacklist dv1394
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution. The latter two modules should be
- blacklisted together with ohci1394 because they depend on ohci1394.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use "install modulename /bin/true" for the modules to be
- blacklisted.
+ depending on your distribution.
config FIREWIRE_OHCI_DEBUG
bool
@@ -77,3 +71,17 @@
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
+
+config FIREWIRE_NET
+ tristate "IP networking over 1394 (EXPERIMENTAL)"
+ depends on FIREWIRE && INET && EXPERIMENTAL
+ help
+ This enables IPv4 over IEEE 1394, providing IP connectivity with
+ other implementations of RFC 2734 as found on several operating
+ systems. Multicast support is currently limited.
+
+ NOTE, this driver is not stable yet!
+
+ To compile this driver as a module, say M here: The module will be
+ called firewire-net. It replaces eth1394 of the classic IEEE 1394
+ stack.
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index bc3b9bf..a8f9bb6 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -6,7 +6,9 @@
core-iso.o core-topology.o core-transaction.o
firewire-ohci-y += ohci.o
firewire-sbp2-y += sbp2.o
+firewire-net-y += net.o
-obj-$(CONFIG_FIREWIRE) += firewire-core.o
+obj-$(CONFIG_FIREWIRE) += firewire-core.o
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
+obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 4c1be64..543fcca 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -176,6 +176,7 @@
return 0;
}
+EXPORT_SYMBOL(fw_core_add_descriptor);
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
@@ -189,6 +190,7 @@
mutex_unlock(&card_mutex);
}
+EXPORT_SYMBOL(fw_core_remove_descriptor);
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
@@ -459,11 +461,11 @@
/*
- * The next few functions implements a dummy driver that use once a
- * card driver shuts down an fw_card. This allows the driver to
- * cleanly unload, as all IO to the card will be handled by the dummy
- * driver instead of calling into the (possibly) unloaded module. The
- * dummy driver just fails all IO.
+ * The next few functions implement a dummy driver that is used once a card
+ * driver shuts down an fw_card. This allows the driver to cleanly unload,
+ * as all IO to the card will be handled (and failed) by the dummy driver
+ * instead of calling into the module. Only functions for iso context
+ * shutdown still need to be provided by the card driver.
*/
static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
@@ -510,7 +512,7 @@
return -ENODEV;
}
-static struct fw_card_driver dummy_driver = {
+static const struct fw_card_driver dummy_driver_template = {
.enable = dummy_enable,
.update_phy_reg = dummy_update_phy_reg,
.set_config_rom = dummy_set_config_rom,
@@ -529,6 +531,8 @@
void fw_core_remove_card(struct fw_card *card)
{
+ struct fw_card_driver dummy_driver = dummy_driver_template;
+
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_core_initiate_bus_reset(card, 1);
@@ -537,7 +541,9 @@
list_del_init(&card->link);
mutex_unlock(&card_mutex);
- /* Set up the dummy driver. */
+ /* Switch off most of the card driver interface. */
+ dummy_driver.free_iso_context = card->driver->free_iso_context;
+ dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
fw_destroy_nodes(card);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 28076c8..166f19c 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -71,7 +71,7 @@
for (j = 0; j < i; j++) {
address = page_private(buffer->pages[j]);
dma_unmap_page(card->device, address,
- PAGE_SIZE, DMA_TO_DEVICE);
+ PAGE_SIZE, direction);
__free_page(buffer->pages[j]);
}
kfree(buffer->pages);
@@ -80,6 +80,7 @@
return -ENOMEM;
}
+EXPORT_SYMBOL(fw_iso_buffer_init);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{
@@ -107,13 +108,14 @@
for (i = 0; i < buffer->page_count; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
- PAGE_SIZE, DMA_TO_DEVICE);
+ PAGE_SIZE, buffer->direction);
__free_page(buffer->pages[i]);
}
kfree(buffer->pages);
buffer->pages = NULL;
}
+EXPORT_SYMBOL(fw_iso_buffer_destroy);
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
@@ -136,6 +138,7 @@
return ctx;
}
+EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
@@ -143,12 +146,14 @@
card->driver->free_iso_context(ctx);
}
+EXPORT_SYMBOL(fw_iso_context_destroy);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
+EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
@@ -159,11 +164,13 @@
return card->driver->queue_iso(ctx, packet, buffer, payload);
}
+EXPORT_SYMBOL(fw_iso_context_queue);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
+EXPORT_SYMBOL(fw_iso_context_stop);
/*
* Isochronous bus resource management (channels, bandwidth), client side
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0a25a7b..c3cfc64 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -1,7 +1,6 @@
#ifndef _FIREWIRE_CORE_H
#define _FIREWIRE_CORE_H
-#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
@@ -97,17 +96,6 @@
int fw_compute_block_crc(u32 *block);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
-struct fw_descriptor {
- struct list_head link;
- size_t length;
- u32 immediate;
- u32 key;
- const u32 *data;
-};
-
-int fw_core_add_descriptor(struct fw_descriptor *desc);
-void fw_core_remove_descriptor(struct fw_descriptor *desc);
-
/* -cdev */
@@ -130,77 +118,7 @@
/* -iso */
-/*
- * The iso packet format allows for an immediate header/payload part
- * stored in 'header' immediately after the packet info plus an
- * indirect payload part that is pointer to by the 'payload' field.
- * Applications can use one or the other or both to implement simple
- * low-bandwidth streaming (e.g. audio) or more advanced
- * scatter-gather streaming (e.g. assembling video frame automatically).
- */
-struct fw_iso_packet {
- u16 payload_length; /* Length of indirect payload. */
- u32 interrupt:1; /* Generate interrupt on this packet */
- u32 skip:1; /* Set to not send packet at all. */
- u32 tag:2;
- u32 sy:4;
- u32 header_length:8; /* Length of immediate header. */
- u32 header[0];
-};
-
-#define FW_ISO_CONTEXT_TRANSMIT 0
-#define FW_ISO_CONTEXT_RECEIVE 1
-
-#define FW_ISO_CONTEXT_MATCH_TAG0 1
-#define FW_ISO_CONTEXT_MATCH_TAG1 2
-#define FW_ISO_CONTEXT_MATCH_TAG2 4
-#define FW_ISO_CONTEXT_MATCH_TAG3 8
-#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
-
-/*
- * An iso buffer is just a set of pages mapped for DMA in the
- * specified direction. Since the pages are to be used for DMA, they
- * are not mapped into the kernel virtual address space. We store the
- * DMA address in the page private. The helper function
- * fw_iso_buffer_map() will map the pages into a given vma.
- */
-struct fw_iso_buffer {
- enum dma_data_direction direction;
- struct page **pages;
- int page_count;
-};
-
-typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
- u32 cycle, size_t header_length,
- void *header, void *data);
-
-struct fw_iso_context {
- struct fw_card *card;
- int type;
- int channel;
- int speed;
- size_t header_size;
- fw_iso_callback_t callback;
- void *callback_data;
-};
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
- int page_count, enum dma_data_direction direction);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
-int fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload);
-int fw_iso_context_start(struct fw_iso_context *ctx,
- int cycle, int sync, int tags);
-int fw_iso_context_stop(struct fw_iso_context *ctx);
-void fw_iso_context_destroy(struct fw_iso_context *ctx);
-
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth, bool allocate);
@@ -285,9 +203,4 @@
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
-static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
-{
- return tag << 14 | channel << 8 | sy;
-}
-
#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
new file mode 100644
index 0000000..a42209a
--- /dev/null
+++ b/drivers/firewire/net.c
@@ -0,0 +1,1655 @@
+/*
+ * IPv4 over IEEE 1394, per RFC 2734
+ *
+ * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
+ *
+ * based on eth1394 by Ben Collins et al
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/highmem.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include <asm/unaligned.h>
+#include <net/arp.h>
+
+#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */
+#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2)
+
+#define IEEE1394_BROADCAST_CHANNEL 31
+#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
+#define IEEE1394_MAX_PAYLOAD_S100 512
+#define FWNET_NO_FIFO_ADDR (~0ULL)
+
+#define IANA_SPECIFIER_ID 0x00005eU
+#define RFC2734_SW_VERSION 0x000001U
+
+#define IEEE1394_GASP_HDR_SIZE 8
+
+#define RFC2374_UNFRAG_HDR_SIZE 4
+#define RFC2374_FRAG_HDR_SIZE 8
+#define RFC2374_FRAG_OVERHEAD 4
+
+#define RFC2374_HDR_UNFRAG 0 /* unfragmented */
+#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */
+#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
+#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
+
+#define RFC2734_HW_ADDR_LEN 16
+
+struct rfc2734_arp {
+ __be16 hw_type; /* 0x0018 */
+ __be16 proto_type; /* 0x0806 */
+ u8 hw_addr_len; /* 16 */
+ u8 ip_addr_len; /* 4 */
+ __be16 opcode; /* ARP Opcode */
+ /* Above is exactly the same format as struct arphdr */
+
+ __be64 s_uniq_id; /* Sender's 64bit EUI */
+ u8 max_rec; /* Sender's max packet size */
+ u8 sspd; /* Sender's max speed */
+ __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
+ __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
+ __be32 sip; /* Sender's IP Address */
+ __be32 tip; /* IP Address of requested hw addr */
+} __attribute__((packed));
+
+/* This header format is specific to this driver implementation. */
+#define FWNET_ALEN 8
+#define FWNET_HLEN 10
+struct fwnet_header {
+ u8 h_dest[FWNET_ALEN]; /* destination address */
+ __be16 h_proto; /* packet type ID field */
+} __attribute__((packed));
+
+/* IPv4 and IPv6 encapsulation header */
+struct rfc2734_header {
+ u32 w0;
+ u32 w1;
+};
+
+#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
+#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
+#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
+#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
+
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_ether_type(et) (et)
+#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_fg_off(fgo) (fgo)
+
+#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
+
+static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
+ unsigned ether_type)
+{
+ hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
+ | fwnet_set_hdr_ether_type(ether_type);
+}
+
+static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
+ unsigned ether_type, unsigned dg_size, unsigned dgl)
+{
+ hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
+ | fwnet_set_hdr_dg_size(dg_size)
+ | fwnet_set_hdr_ether_type(ether_type);
+ hdr->w1 = fwnet_set_hdr_dgl(dgl);
+}
+
+static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
+ unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
+{
+ hdr->w0 = fwnet_set_hdr_lf(lf)
+ | fwnet_set_hdr_dg_size(dg_size)
+ | fwnet_set_hdr_fg_off(fg_off);
+ hdr->w1 = fwnet_set_hdr_dgl(dgl);
+}
+
+/* This list keeps track of what parts of the datagram have been filled in */
+struct fwnet_fragment_info {
+ struct list_head fi_link;
+ u16 offset;
+ u16 len;
+};
+
+struct fwnet_partial_datagram {
+ struct list_head pd_link;
+ struct list_head fi_list;
+ struct sk_buff *skb;
+ /* FIXME Why not use skb->data? */
+ char *pbuf;
+ u16 datagram_label;
+ u16 ether_type;
+ u16 datagram_size;
+};
+
+static DEFINE_MUTEX(fwnet_device_mutex);
+static LIST_HEAD(fwnet_device_list);
+
+struct fwnet_device {
+ struct list_head dev_link;
+ spinlock_t lock;
+ enum {
+ FWNET_BROADCAST_ERROR,
+ FWNET_BROADCAST_RUNNING,
+ FWNET_BROADCAST_STOPPED,
+ } broadcast_state;
+ struct fw_iso_context *broadcast_rcv_context;
+ struct fw_iso_buffer broadcast_rcv_buffer;
+ void **broadcast_rcv_buffer_ptrs;
+ unsigned broadcast_rcv_next_ptr;
+ unsigned num_broadcast_rcv_ptrs;
+ unsigned rcv_buffer_size;
+ /*
+ * This value is the maximum unfragmented datagram size that can be
+ * sent by the hardware. It already has the GASP overhead and the
+ * unfragmented datagram header overhead calculated into it.
+ */
+ unsigned broadcast_xmt_max_payload;
+ u16 broadcast_xmt_datagramlabel;
+
+ /*
+ * The CSR address that remote nodes must send datagrams to for us to
+ * receive them.
+ */
+ struct fw_address_handler handler;
+ u64 local_fifo;
+
+ /* List of packets to be sent */
+ struct list_head packet_list;
+ /*
+ * List of packets that were broadcasted. When we get an ISO interrupt
+ * one of them has been sent
+ */
+ struct list_head broadcasted_list;
+ /* List of packets that have been sent but not yet acked */
+ struct list_head sent_list;
+
+ struct list_head peer_list;
+ struct fw_card *card;
+ struct net_device *netdev;
+};
+
+struct fwnet_peer {
+ struct list_head peer_link;
+ struct fwnet_device *dev;
+ u64 guid;
+ u64 fifo;
+
+ /* guarded by dev->lock */
+ struct list_head pd_list; /* received partial datagrams */
+ unsigned pdg_size; /* pd_list size */
+
+ u16 datagram_label; /* outgoing datagram label */
+ unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
+ int node_id;
+ int generation;
+ unsigned speed;
+};
+
+/* This is our task struct. It's used for the packet complete callback. */
+struct fwnet_packet_task {
+ /*
+ * ptask can actually be on dev->packet_list, dev->broadcasted_list,
+ * or dev->sent_list depending on its current state.
+ */
+ struct list_head pt_link;
+ struct fw_transaction transaction;
+ struct rfc2734_header hdr;
+ struct sk_buff *skb;
+ struct fwnet_device *dev;
+
+ int outstanding_pkts;
+ unsigned max_payload;
+ u64 fifo_addr;
+ u16 dest_node;
+ u8 generation;
+ u8 speed;
+};
+
+/*
+ * saddr == NULL means use device source address.
+ * daddr == NULL means leave destination address (eg unresolved arp).
+ */
+static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned len)
+{
+ struct fwnet_header *h;
+
+ h = (struct fwnet_header *)skb_push(skb, sizeof(*h));
+ put_unaligned_be16(type, &h->h_proto);
+
+ if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ memset(h->h_dest, 0, net->addr_len);
+
+ return net->hard_header_len;
+ }
+
+ if (daddr) {
+ memcpy(h->h_dest, daddr, net->addr_len);
+
+ return net->hard_header_len;
+ }
+
+ return -net->hard_header_len;
+}
+
+static int fwnet_header_rebuild(struct sk_buff *skb)
+{
+ struct fwnet_header *h = (struct fwnet_header *)skb->data;
+
+ if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
+ return arp_find((unsigned char *)&h->h_dest, skb);
+
+ fw_notify("%s: unable to resolve type %04x addresses\n",
+ skb->dev->name, be16_to_cpu(h->h_proto));
+ return 0;
+}
+
+static int fwnet_header_cache(const struct neighbour *neigh,
+ struct hh_cache *hh)
+{
+ struct net_device *net;
+ struct fwnet_header *h;
+
+ if (hh->hh_type == cpu_to_be16(ETH_P_802_3))
+ return -1;
+ net = neigh->dev;
+ h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
+ h->h_proto = hh->hh_type;
+ memcpy(h->h_dest, neigh->ha, net->addr_len);
+ hh->hh_len = FWNET_HLEN;
+
+ return 0;
+}
+
+/* Called by Address Resolution module to notify changes in address. */
+static void fwnet_header_cache_update(struct hh_cache *hh,
+ const struct net_device *net, const unsigned char *haddr)
+{
+ memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len);
+}
+
+static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
+
+ return FWNET_ALEN;
+}
+
+static const struct header_ops fwnet_header_ops = {
+ .create = fwnet_header_create,
+ .rebuild = fwnet_header_rebuild,
+ .cache = fwnet_header_cache,
+ .cache_update = fwnet_header_cache_update,
+ .parse = fwnet_header_parse,
+};
+
+/* FIXME: is this correct for all cases? */
+static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
+ unsigned offset, unsigned len)
+{
+ struct fwnet_fragment_info *fi;
+ unsigned end = offset + len;
+
+ list_for_each_entry(fi, &pd->fi_list, fi_link)
+ if (offset < fi->offset + fi->len && end > fi->offset)
+ return true;
+
+ return false;
+}
+
+/* Assumes that new fragment does not overlap any existing fragments */
+static struct fwnet_fragment_info *fwnet_frag_new(
+ struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
+{
+ struct fwnet_fragment_info *fi, *fi2, *new;
+ struct list_head *list;
+
+ list = &pd->fi_list;
+ list_for_each_entry(fi, &pd->fi_list, fi_link) {
+ if (fi->offset + fi->len == offset) {
+ /* The new fragment can be tacked on to the end */
+ /* Did the new fragment plug a hole? */
+ fi2 = list_entry(fi->fi_link.next,
+ struct fwnet_fragment_info, fi_link);
+ if (fi->offset + fi->len == fi2->offset) {
+ /* glue fragments together */
+ fi->len += len + fi2->len;
+ list_del(&fi2->fi_link);
+ kfree(fi2);
+ } else {
+ fi->len += len;
+ }
+
+ return fi;
+ }
+ if (offset + len == fi->offset) {
+ /* The new fragment can be tacked on to the beginning */
+ /* Did the new fragment plug a hole? */
+ fi2 = list_entry(fi->fi_link.prev,
+ struct fwnet_fragment_info, fi_link);
+ if (fi2->offset + fi2->len == fi->offset) {
+ /* glue fragments together */
+ fi2->len += fi->len + len;
+ list_del(&fi->fi_link);
+ kfree(fi);
+
+ return fi2;
+ }
+ fi->offset = offset;
+ fi->len += len;
+
+ return fi;
+ }
+ if (offset > fi->offset + fi->len) {
+ list = &fi->fi_link;
+ break;
+ }
+ if (offset + len < fi->offset) {
+ list = fi->fi_link.prev;
+ break;
+ }
+ }
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ fw_error("out of memory\n");
+ return NULL;
+ }
+
+ new->offset = offset;
+ new->len = len;
+ list_add(&new->fi_link, list);
+
+ return new;
+}
+
+static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
+ struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
+ void *frag_buf, unsigned frag_off, unsigned frag_len)
+{
+ struct fwnet_partial_datagram *new;
+ struct fwnet_fragment_info *fi;
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ goto fail;
+
+ INIT_LIST_HEAD(&new->fi_list);
+ fi = fwnet_frag_new(new, frag_off, frag_len);
+ if (fi == NULL)
+ goto fail_w_new;
+
+ new->datagram_label = datagram_label;
+ new->datagram_size = dg_size;
+ new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15);
+ if (new->skb == NULL)
+ goto fail_w_fi;
+
+ skb_reserve(new->skb, (net->hard_header_len + 15) & ~15);
+ new->pbuf = skb_put(new->skb, dg_size);
+ memcpy(new->pbuf + frag_off, frag_buf, frag_len);
+ list_add_tail(&new->pd_link, &peer->pd_list);
+
+ return new;
+
+fail_w_fi:
+ kfree(fi);
+fail_w_new:
+ kfree(new);
+fail:
+ fw_error("out of memory\n");
+
+ return NULL;
+}
+
+static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
+ u16 datagram_label)
+{
+ struct fwnet_partial_datagram *pd;
+
+ list_for_each_entry(pd, &peer->pd_list, pd_link)
+ if (pd->datagram_label == datagram_label)
+ return pd;
+
+ return NULL;
+}
+
+
+static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
+{
+ struct fwnet_fragment_info *fi, *n;
+
+ list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
+ kfree(fi);
+
+ list_del(&old->pd_link);
+ dev_kfree_skb_any(old->skb);
+ kfree(old);
+}
+
+static bool fwnet_pd_update(struct fwnet_peer *peer,
+ struct fwnet_partial_datagram *pd, void *frag_buf,
+ unsigned frag_off, unsigned frag_len)
+{
+ if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
+ return false;
+
+ memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
+
+ /*
+ * Move list entry to beginnig of list so that oldest partial
+ * datagrams percolate to the end of the list
+ */
+ list_move_tail(&pd->pd_link, &peer->pd_list);
+
+ return true;
+}
+
+static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
+{
+ struct fwnet_fragment_info *fi;
+
+ fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
+
+ return fi->len == pd->datagram_size;
+}
+
+/* caller must hold dev->lock */
+static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
+ u64 guid)
+{
+ struct fwnet_peer *peer;
+
+ list_for_each_entry(peer, &dev->peer_list, peer_link)
+ if (peer->guid == guid)
+ return peer;
+
+ return NULL;
+}
+
+/* caller must hold dev->lock */
+static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
+ int node_id, int generation)
+{
+ struct fwnet_peer *peer;
+
+ list_for_each_entry(peer, &dev->peer_list, peer_link)
+ if (peer->node_id == node_id &&
+ peer->generation == generation)
+ return peer;
+
+ return NULL;
+}
+
+/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
+static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
+{
+ max_rec = min(max_rec, speed + 8);
+ max_rec = min(max_rec, 0xbU); /* <= 4096 */
+ if (max_rec < 8) {
+ fw_notify("max_rec %x out of range\n", max_rec);
+ max_rec = 8;
+ }
+
+ return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
+}
+
+
+static int fwnet_finish_incoming_packet(struct net_device *net,
+ struct sk_buff *skb, u16 source_node_id,
+ bool is_broadcast, u16 ether_type)
+{
+ struct fwnet_device *dev;
+ static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
+ int status;
+ __be64 guid;
+
+ dev = netdev_priv(net);
+ /* Write metadata, and then pass to the receive level */
+ skb->dev = net;
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
+
+ /*
+ * Parse the encapsulation header. This actually does the job of
+ * converting to an ethernet frame header, as well as arp
+ * conversion if needed. ARP conversion is easier in this
+ * direction, since we are using ethernet as our backend.
+ */
+ /*
+ * If this is an ARP packet, convert it. First, we want to make
+ * use of some of the fields, since they tell us a little bit
+ * about the sending machine.
+ */
+ if (ether_type == ETH_P_ARP) {
+ struct rfc2734_arp *arp1394;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ u64 fifo_addr;
+ u64 peer_guid;
+ unsigned sspd;
+ u16 max_payload;
+ struct fwnet_peer *peer;
+ unsigned long flags;
+
+ arp1394 = (struct rfc2734_arp *)skb->data;
+ arp = (struct arphdr *)skb->data;
+ arp_ptr = (unsigned char *)(arp + 1);
+ peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
+ fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
+ | get_unaligned_be32(&arp1394->fifo_lo);
+
+ sspd = arp1394->sspd;
+ /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
+ if (sspd > SCODE_3200) {
+ fw_notify("sspd %x out of range\n", sspd);
+ sspd = SCODE_3200;
+ }
+ max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ peer = fwnet_peer_find_by_guid(dev, peer_guid);
+ if (peer) {
+ peer->fifo = fifo_addr;
+
+ if (peer->speed > sspd)
+ peer->speed = sspd;
+ if (peer->max_payload > max_payload)
+ peer->max_payload = max_payload;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!peer) {
+ fw_notify("No peer for ARP packet from %016llx\n",
+ (unsigned long long)peer_guid);
+ goto failed_proto;
+ }
+
+ /*
+ * Now that we're done with the 1394 specific stuff, we'll
+ * need to alter some of the data. Believe it or not, all
+ * that needs to be done is sender_IP_address needs to be
+ * moved, the destination hardware address get stuffed
+ * in and the hardware address length set to 8.
+ *
+ * IMPORTANT: The code below overwrites 1394 specific data
+ * needed above so keep the munging of the data for the
+ * higher level IP stack last.
+ */
+
+ arp->ar_hln = 8;
+ /* skip over sender unique id */
+ arp_ptr += arp->ar_hln;
+ /* move sender IP addr */
+ put_unaligned(arp1394->sip, (u32 *)arp_ptr);
+ /* skip over sender IP addr */
+ arp_ptr += arp->ar_pln;
+
+ if (arp->ar_op == htons(ARPOP_REQUEST))
+ memset(arp_ptr, 0, sizeof(u64));
+ else
+ memcpy(arp_ptr, net->dev_addr, sizeof(u64));
+ }
+
+ /* Now add the ethernet header. */
+ guid = cpu_to_be64(dev->card->guid);
+ if (dev_hard_header(skb, net, ether_type,
+ is_broadcast ? &broadcast_hw : &guid,
+ NULL, skb->len) >= 0) {
+ struct fwnet_header *eth;
+ u16 *rawp;
+ __be16 protocol;
+
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ eth = (struct fwnet_header *)skb_mac_header(skb);
+ if (*eth->h_dest & 1) {
+ if (memcmp(eth->h_dest, net->broadcast,
+ net->addr_len) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+#if 0
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+#endif
+ } else {
+ if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ if (ntohs(eth->h_proto) >= 1536) {
+ protocol = eth->h_proto;
+ } else {
+ rawp = (u16 *)skb->data;
+ if (*rawp == 0xffff)
+ protocol = htons(ETH_P_802_3);
+ else
+ protocol = htons(ETH_P_802_2);
+ }
+ skb->protocol = protocol;
+ }
+ status = netif_rx(skb);
+ if (status == NET_RX_DROP) {
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+ } else {
+ net->stats.rx_packets++;
+ net->stats.rx_bytes += skb->len;
+ }
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ return 0;
+
+ failed_proto:
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+
+ dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ net->last_rx = jiffies;
+
+ return 0;
+}
+
+static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ int source_node_id, int generation,
+ bool is_broadcast)
+{
+ struct sk_buff *skb;
+ struct net_device *net = dev->netdev;
+ struct rfc2734_header hdr;
+ unsigned lf;
+ unsigned long flags;
+ struct fwnet_peer *peer;
+ struct fwnet_partial_datagram *pd;
+ int fg_off;
+ int dg_size;
+ u16 datagram_label;
+ int retval;
+ u16 ether_type;
+
+ hdr.w0 = be32_to_cpu(buf[0]);
+ lf = fwnet_get_hdr_lf(&hdr);
+ if (lf == RFC2374_HDR_UNFRAG) {
+ /*
+ * An unfragmented datagram has been received by the ieee1394
+ * bus. Build an skbuff around it so we can pass it to the
+ * high level network layer.
+ */
+ ether_type = fwnet_get_hdr_ether_type(&hdr);
+ buf++;
+ len -= RFC2374_UNFRAG_HDR_SIZE;
+
+ skb = dev_alloc_skb(len + net->hard_header_len + 15);
+ if (unlikely(!skb)) {
+ fw_error("out of memory\n");
+ net->stats.rx_dropped++;
+
+ return -1;
+ }
+ skb_reserve(skb, (net->hard_header_len + 15) & ~15);
+ memcpy(skb_put(skb, len), buf, len);
+
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ is_broadcast, ether_type);
+ }
+ /* A datagram fragment has been received, now the fun begins. */
+ hdr.w1 = ntohl(buf[1]);
+ buf += 2;
+ len -= RFC2374_FRAG_HDR_SIZE;
+ if (lf == RFC2374_HDR_FIRSTFRAG) {
+ ether_type = fwnet_get_hdr_ether_type(&hdr);
+ fg_off = 0;
+ } else {
+ ether_type = 0;
+ fg_off = fwnet_get_hdr_fg_off(&hdr);
+ }
+ datagram_label = fwnet_get_hdr_dgl(&hdr);
+ dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
+ if (!peer)
+ goto bad_proto;
+
+ pd = fwnet_pd_find(peer, datagram_label);
+ if (pd == NULL) {
+ while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
+ /* remove the oldest */
+ fwnet_pd_delete(list_first_entry(&peer->pd_list,
+ struct fwnet_partial_datagram, pd_link));
+ peer->pdg_size--;
+ }
+ pd = fwnet_pd_new(net, peer, datagram_label,
+ dg_size, buf, fg_off, len);
+ if (pd == NULL) {
+ retval = -ENOMEM;
+ goto bad_proto;
+ }
+ peer->pdg_size++;
+ } else {
+ if (fwnet_frag_overlap(pd, fg_off, len) ||
+ pd->datagram_size != dg_size) {
+ /*
+ * Differing datagram sizes or overlapping fragments,
+ * discard old datagram and start a new one.
+ */
+ fwnet_pd_delete(pd);
+ pd = fwnet_pd_new(net, peer, datagram_label,
+ dg_size, buf, fg_off, len);
+ if (pd == NULL) {
+ retval = -ENOMEM;
+ peer->pdg_size--;
+ goto bad_proto;
+ }
+ } else {
+ if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
+ /*
+ * Couldn't save off fragment anyway
+ * so might as well obliterate the
+ * datagram now.
+ */
+ fwnet_pd_delete(pd);
+ peer->pdg_size--;
+ goto bad_proto;
+ }
+ }
+ } /* new datagram or add to existing one */
+
+ if (lf == RFC2374_HDR_FIRSTFRAG)
+ pd->ether_type = ether_type;
+
+ if (fwnet_pd_is_complete(pd)) {
+ ether_type = pd->ether_type;
+ peer->pdg_size--;
+ skb = skb_get(pd->skb);
+ fwnet_pd_delete(pd);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ false, ether_type);
+ }
+ /*
+ * Datagram is not complete, we're done for the
+ * moment.
+ */
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+
+ bad_proto:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ return 0;
+}
+
+static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
+ int tcode, int destination, int source, int generation,
+ int speed, unsigned long long offset, void *payload,
+ size_t length, void *callback_data)
+{
+ struct fwnet_device *dev = callback_data;
+ int rcode;
+
+ if (destination == IEEE1394_ALL_NODES) {
+ kfree(r);
+
+ return;
+ }
+
+ if (offset != dev->handler.offset)
+ rcode = RCODE_ADDRESS_ERROR;
+ else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
+ rcode = RCODE_TYPE_ERROR;
+ else if (fwnet_incoming_packet(dev, payload, length,
+ source, generation, false) != 0) {
+ fw_error("Incoming packet failure\n");
+ rcode = RCODE_CONFLICT_ERROR;
+ } else
+ rcode = RCODE_COMPLETE;
+
+ fw_send_response(card, r, rcode);
+}
+
+static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ u32 cycle, size_t header_length, void *header, void *data)
+{
+ struct fwnet_device *dev;
+ struct fw_iso_packet packet;
+ struct fw_card *card;
+ __be16 *hdr_ptr;
+ __be32 *buf_ptr;
+ int retval;
+ u32 length;
+ u16 source_node_id;
+ u32 specifier_id;
+ u32 ver;
+ unsigned long offset;
+ unsigned long flags;
+
+ dev = data;
+ card = dev->card;
+ hdr_ptr = header;
+ length = be16_to_cpup(hdr_ptr);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
+ buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
+ if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
+ dev->broadcast_rcv_next_ptr = 0;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
+ | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
+ ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
+ source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
+
+ if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ buf_ptr += 2;
+ length -= IEEE1394_GASP_HDR_SIZE;
+ fwnet_incoming_packet(dev, buf_ptr, length,
+ source_node_id, -1, true);
+ }
+
+ packet.payload_length = dev->rcv_buffer_size;
+ packet.interrupt = 1;
+ packet.skip = 0;
+ packet.tag = 3;
+ packet.sy = 0;
+ packet.header_length = IEEE1394_GASP_HDR_SIZE;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
+ &dev->broadcast_rcv_buffer, offset);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (retval < 0)
+ fw_error("requeue failed\n");
+}
+
+static struct kmem_cache *fwnet_packet_task_cache;
+
+static int fwnet_send_packet(struct fwnet_packet_task *ptask);
+
+static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev;
+ unsigned long flags;
+
+ dev = ptask->dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_del(&ptask->pt_link);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ptask->outstanding_pkts--; /* FIXME access inside lock */
+
+ if (ptask->outstanding_pkts > 0) {
+ u16 dg_size;
+ u16 fg_off;
+ u16 datagram_label;
+ u16 lf;
+ struct sk_buff *skb;
+
+ /* Update the ptask to point to the next fragment and send it */
+ lf = fwnet_get_hdr_lf(&ptask->hdr);
+ switch (lf) {
+ case RFC2374_HDR_LASTFRAG:
+ case RFC2374_HDR_UNFRAG:
+ default:
+ fw_error("Outstanding packet %x lf %x, header %x,%x\n",
+ ptask->outstanding_pkts, lf, ptask->hdr.w0,
+ ptask->hdr.w1);
+ BUG();
+
+ case RFC2374_HDR_FIRSTFRAG:
+ /* Set frag type here for future interior fragments */
+ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
+ fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
+ datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
+ break;
+
+ case RFC2374_HDR_INTFRAG:
+ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
+ fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
+ + ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
+ datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
+ break;
+ }
+ skb = ptask->skb;
+ skb_pull(skb, ptask->max_payload);
+ if (ptask->outstanding_pkts > 1) {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
+ dg_size, fg_off, datagram_label);
+ } else {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
+ dg_size, fg_off, datagram_label);
+ ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
+ }
+ fwnet_send_packet(ptask);
+ } else {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+}
+
+static void fwnet_write_complete(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct fwnet_packet_task *ptask;
+
+ ptask = data;
+
+ if (rcode == RCODE_COMPLETE)
+ fwnet_transmit_packet_done(ptask);
+ else
+ fw_error("fwnet_write_complete: failed: %x\n", rcode);
+ /* ??? error recovery */
+}
+
+static int fwnet_send_packet(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev;
+ unsigned tx_len;
+ struct rfc2734_header *bufhdr;
+ unsigned long flags;
+
+ dev = ptask->dev;
+ tx_len = ptask->max_payload;
+ switch (fwnet_get_hdr_lf(&ptask->hdr)) {
+ case RFC2374_HDR_UNFRAG:
+ bufhdr = (struct rfc2734_header *)
+ skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
+ put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
+ break;
+
+ case RFC2374_HDR_FIRSTFRAG:
+ case RFC2374_HDR_INTFRAG:
+ case RFC2374_HDR_LASTFRAG:
+ bufhdr = (struct rfc2734_header *)
+ skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
+ put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
+ put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
+ break;
+
+ default:
+ BUG();
+ }
+ if (ptask->dest_node == IEEE1394_ALL_NODES) {
+ u8 *p;
+ int generation;
+ int node_id;
+
+ /* ptask->generation may not have been set yet */
+ generation = dev->card->generation;
+ smp_rmb();
+ node_id = dev->card->node_id;
+
+ p = skb_push(ptask->skb, 8);
+ put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
+ put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
+ | RFC2734_SW_VERSION, &p[4]);
+
+ /* We should not transmit if broadcast_channel.valid == 0. */
+ fw_send_request(dev->card, &ptask->transaction,
+ TCODE_STREAM_DATA,
+ fw_stream_packet_destination_id(3,
+ IEEE1394_BROADCAST_CHANNEL, 0),
+ generation, SCODE_100, 0ULL, ptask->skb->data,
+ tx_len + 8, fwnet_write_complete, ptask);
+
+ /* FIXME race? */
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+ }
+
+ fw_send_request(dev->card, &ptask->transaction,
+ TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
+ ptask->generation, ptask->speed, ptask->fifo_addr,
+ ptask->skb->data, tx_len, fwnet_write_complete, ptask);
+
+ /* FIXME race? */
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&ptask->pt_link, &dev->sent_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ dev->netdev->trans_start = jiffies;
+
+ return 0;
+}
+
+static int fwnet_broadcast_start(struct fwnet_device *dev)
+{
+ struct fw_iso_context *context;
+ int retval;
+ unsigned num_packets;
+ unsigned max_receive;
+ struct fw_iso_packet packet;
+ unsigned long offset;
+ unsigned u;
+
+ if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
+ /* outside OHCI posted write area? */
+ static const struct fw_address_region region = {
+ .start = 0xffff00000000ULL,
+ .end = CSR_REGISTER_BASE,
+ };
+
+ dev->handler.length = 4096;
+ dev->handler.address_callback = fwnet_receive_packet;
+ dev->handler.callback_data = dev;
+
+ retval = fw_core_add_address_handler(&dev->handler, ®ion);
+ if (retval < 0)
+ goto failed_initial;
+
+ dev->local_fifo = dev->handler.offset;
+ }
+
+ max_receive = 1U << (dev->card->max_receive + 1);
+ num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
+
+ if (!dev->broadcast_rcv_context) {
+ void **ptrptr;
+
+ context = fw_iso_context_create(dev->card,
+ FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
+ dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
+ if (IS_ERR(context)) {
+ retval = PTR_ERR(context);
+ goto failed_context_create;
+ }
+
+ retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
+ dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
+ if (retval < 0)
+ goto failed_buffer_init;
+
+ ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
+ if (!ptrptr) {
+ retval = -ENOMEM;
+ goto failed_ptrs_alloc;
+ }
+
+ dev->broadcast_rcv_buffer_ptrs = ptrptr;
+ for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
+ void *ptr;
+ unsigned v;
+
+ ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
+ for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
+ *ptrptr++ = (void *)
+ ((char *)ptr + v * max_receive);
+ }
+ dev->broadcast_rcv_context = context;
+ } else {
+ context = dev->broadcast_rcv_context;
+ }
+
+ packet.payload_length = max_receive;
+ packet.interrupt = 1;
+ packet.skip = 0;
+ packet.tag = 3;
+ packet.sy = 0;
+ packet.header_length = IEEE1394_GASP_HDR_SIZE;
+ offset = 0;
+
+ for (u = 0; u < num_packets; u++) {
+ retval = fw_iso_context_queue(context, &packet,
+ &dev->broadcast_rcv_buffer, offset);
+ if (retval < 0)
+ goto failed_rcv_queue;
+
+ offset += max_receive;
+ }
+ dev->num_broadcast_rcv_ptrs = num_packets;
+ dev->rcv_buffer_size = max_receive;
+ dev->broadcast_rcv_next_ptr = 0U;
+ retval = fw_iso_context_start(context, -1, 0,
+ FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
+ if (retval < 0)
+ goto failed_rcv_queue;
+
+ /* FIXME: adjust it according to the min. speed of all known peers? */
+ dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
+ - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
+ dev->broadcast_state = FWNET_BROADCAST_RUNNING;
+
+ return 0;
+
+ failed_rcv_queue:
+ kfree(dev->broadcast_rcv_buffer_ptrs);
+ dev->broadcast_rcv_buffer_ptrs = NULL;
+ failed_ptrs_alloc:
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
+ failed_buffer_init:
+ fw_iso_context_destroy(context);
+ dev->broadcast_rcv_context = NULL;
+ failed_context_create:
+ fw_core_remove_address_handler(&dev->handler);
+ failed_initial:
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+
+ return retval;
+}
+
+/* ifup */
+static int fwnet_open(struct net_device *net)
+{
+ struct fwnet_device *dev = netdev_priv(net);
+ int ret;
+
+ if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
+ ret = fwnet_broadcast_start(dev);
+ if (ret)
+ return ret;
+ }
+ netif_start_queue(net);
+
+ return 0;
+}
+
+/* ifdown */
+static int fwnet_stop(struct net_device *net)
+{
+ netif_stop_queue(net);
+
+ /* Deallocate iso context for use by other applications? */
+
+ return 0;
+}
+
+static int fwnet_tx(struct sk_buff *skb, struct net_device *net)
+{
+ struct fwnet_header hdr_buf;
+ struct fwnet_device *dev = netdev_priv(net);
+ __be16 proto;
+ u16 dest_node;
+ unsigned max_payload;
+ u16 dg_size;
+ u16 *datagram_label_ptr;
+ struct fwnet_packet_task *ptask;
+ struct fwnet_peer *peer;
+ unsigned long flags;
+
+ ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
+ if (ptask == NULL)
+ goto fail;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto fail;
+
+ /*
+ * Make a copy of the driver-specific header.
+ * We might need to rebuild the header on tx failure.
+ */
+ memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
+ skb_pull(skb, sizeof(hdr_buf));
+
+ proto = hdr_buf.h_proto;
+ dg_size = skb->len;
+
+ /* serialize access to peer, including peer->datagram_label */
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /*
+ * Set the transmission type for the packet. ARP packets and IP
+ * broadcast packets are sent via GASP.
+ */
+ if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
+ || proto == htons(ETH_P_ARP)
+ || (proto == htons(ETH_P_IP)
+ && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
+ max_payload = dev->broadcast_xmt_max_payload;
+ datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
+
+ ptask->fifo_addr = FWNET_NO_FIFO_ADDR;
+ ptask->generation = 0;
+ ptask->dest_node = IEEE1394_ALL_NODES;
+ ptask->speed = SCODE_100;
+ } else {
+ __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
+ u8 generation;
+
+ peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
+ if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
+ goto fail_unlock;
+
+ generation = peer->generation;
+ dest_node = peer->node_id;
+ max_payload = peer->max_payload;
+ datagram_label_ptr = &peer->datagram_label;
+
+ ptask->fifo_addr = peer->fifo;
+ ptask->generation = generation;
+ ptask->dest_node = dest_node;
+ ptask->speed = peer->speed;
+ }
+
+ /* If this is an ARP packet, convert it */
+ if (proto == htons(ETH_P_ARP)) {
+ struct arphdr *arp = (struct arphdr *)skb->data;
+ unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+ struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
+ __be32 ipaddr;
+
+ ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
+
+ arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
+ arp1394->max_rec = dev->card->max_receive;
+ arp1394->sspd = dev->card->link_speed;
+
+ put_unaligned_be16(dev->local_fifo >> 32,
+ &arp1394->fifo_hi);
+ put_unaligned_be32(dev->local_fifo & 0xffffffff,
+ &arp1394->fifo_lo);
+ put_unaligned(ipaddr, &arp1394->sip);
+ }
+
+ ptask->hdr.w0 = 0;
+ ptask->hdr.w1 = 0;
+ ptask->skb = skb;
+ ptask->dev = dev;
+
+ /* Does it all fit in one packet? */
+ if (dg_size <= max_payload) {
+ fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
+ ptask->outstanding_pkts = 1;
+ max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
+ } else {
+ u16 datagram_label;
+
+ max_payload -= RFC2374_FRAG_OVERHEAD;
+ datagram_label = (*datagram_label_ptr)++;
+ fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
+ datagram_label);
+ ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
+ max_payload += RFC2374_FRAG_HDR_SIZE;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ptask->max_payload = max_payload;
+ fwnet_send_packet(ptask);
+
+ return NETDEV_TX_OK;
+
+ fail_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ fail:
+ if (ptask)
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+
+ if (skb != NULL)
+ dev_kfree_skb(skb);
+
+ net->stats.tx_dropped++;
+ net->stats.tx_errors++;
+
+ /*
+ * FIXME: According to a patch from 2003-02-26, "returning non-zero
+ * causes serious problems" here, allegedly. Before that patch,
+ * -ERRNO was returned which is not appropriate under Linux 2.6.
+ * Perhaps more needs to be done? Stop the queue in serious
+ * conditions and restart it elsewhere?
+ */
+ return NETDEV_TX_OK;
+}
+
+static int fwnet_change_mtu(struct net_device *net, int new_mtu)
+{
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ net->mtu = new_mtu;
+ return 0;
+}
+
+static void fwnet_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->bus_info, "ieee1394");
+}
+
+static struct ethtool_ops fwnet_ethtool_ops = {
+ .get_drvinfo = fwnet_get_drvinfo,
+};
+
+static const struct net_device_ops fwnet_netdev_ops = {
+ .ndo_open = fwnet_open,
+ .ndo_stop = fwnet_stop,
+ .ndo_start_xmit = fwnet_tx,
+ .ndo_change_mtu = fwnet_change_mtu,
+};
+
+static void fwnet_init_dev(struct net_device *net)
+{
+ net->header_ops = &fwnet_header_ops;
+ net->netdev_ops = &fwnet_netdev_ops;
+ net->watchdog_timeo = 2 * HZ;
+ net->flags = IFF_BROADCAST | IFF_MULTICAST;
+ net->features = NETIF_F_HIGHDMA;
+ net->addr_len = FWNET_ALEN;
+ net->hard_header_len = FWNET_HLEN;
+ net->type = ARPHRD_IEEE1394;
+ net->tx_queue_len = 10;
+ SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops);
+}
+
+/* caller must hold fwnet_device_mutex */
+static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
+{
+ struct fwnet_device *dev;
+
+ list_for_each_entry(dev, &fwnet_device_list, dev_link)
+ if (dev->card == card)
+ return dev;
+
+ return NULL;
+}
+
+static int fwnet_add_peer(struct fwnet_device *dev,
+ struct fw_unit *unit, struct fw_device *device)
+{
+ struct fwnet_peer *peer;
+
+ peer = kmalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return -ENOMEM;
+
+ dev_set_drvdata(&unit->device, peer);
+
+ peer->dev = dev;
+ peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
+ peer->fifo = FWNET_NO_FIFO_ADDR;
+ INIT_LIST_HEAD(&peer->pd_list);
+ peer->pdg_size = 0;
+ peer->datagram_label = 0;
+ peer->speed = device->max_speed;
+ peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
+
+ peer->generation = device->generation;
+ smp_rmb();
+ peer->node_id = device->node_id;
+
+ spin_lock_irq(&dev->lock);
+ list_add_tail(&peer->peer_link, &dev->peer_list);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int fwnet_probe(struct device *_dev)
+{
+ struct fw_unit *unit = fw_unit(_dev);
+ struct fw_device *device = fw_parent_device(unit);
+ struct fw_card *card = device->card;
+ struct net_device *net;
+ bool allocated_netdev = false;
+ struct fwnet_device *dev;
+ unsigned max_mtu;
+ int ret;
+
+ mutex_lock(&fwnet_device_mutex);
+
+ dev = fwnet_dev_find(card);
+ if (dev) {
+ net = dev->netdev;
+ goto have_dev;
+ }
+
+ net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev);
+ if (net == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ allocated_netdev = true;
+ SET_NETDEV_DEV(net, card->device);
+ dev = netdev_priv(net);
+
+ spin_lock_init(&dev->lock);
+ dev->broadcast_state = FWNET_BROADCAST_ERROR;
+ dev->broadcast_rcv_context = NULL;
+ dev->broadcast_xmt_max_payload = 0;
+ dev->broadcast_xmt_datagramlabel = 0;
+
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+
+ INIT_LIST_HEAD(&dev->packet_list);
+ INIT_LIST_HEAD(&dev->broadcasted_list);
+ INIT_LIST_HEAD(&dev->sent_list);
+ INIT_LIST_HEAD(&dev->peer_list);
+
+ dev->card = card;
+ dev->netdev = net;
+
+ /*
+ * Use the RFC 2734 default 1500 octets or the maximum payload
+ * as initial MTU
+ */
+ max_mtu = (1 << (card->max_receive + 1))
+ - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE;
+ net->mtu = min(1500U, max_mtu);
+
+ /* Set our hardware address while we're at it */
+ put_unaligned_be64(card->guid, net->dev_addr);
+ put_unaligned_be64(~0ULL, net->broadcast);
+ ret = register_netdev(net);
+ if (ret) {
+ fw_error("Cannot register the driver\n");
+ goto out;
+ }
+
+ list_add_tail(&dev->dev_link, &fwnet_device_list);
+ fw_notify("%s: IPv4 over FireWire on device %016llx\n",
+ net->name, (unsigned long long)card->guid);
+ have_dev:
+ ret = fwnet_add_peer(dev, unit, device);
+ if (ret && allocated_netdev) {
+ unregister_netdev(net);
+ list_del(&dev->dev_link);
+ }
+ out:
+ if (ret && allocated_netdev)
+ free_netdev(net);
+
+ mutex_unlock(&fwnet_device_mutex);
+
+ return ret;
+}
+
+static void fwnet_remove_peer(struct fwnet_peer *peer)
+{
+ struct fwnet_partial_datagram *pd, *pd_next;
+
+ spin_lock_irq(&peer->dev->lock);
+ list_del(&peer->peer_link);
+ spin_unlock_irq(&peer->dev->lock);
+
+ list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
+ fwnet_pd_delete(pd);
+
+ kfree(peer);
+}
+
+static int fwnet_remove(struct device *_dev)
+{
+ struct fwnet_peer *peer = dev_get_drvdata(_dev);
+ struct fwnet_device *dev = peer->dev;
+ struct net_device *net;
+ struct fwnet_packet_task *ptask, *pt_next;
+
+ mutex_lock(&fwnet_device_mutex);
+
+ fwnet_remove_peer(peer);
+
+ if (list_empty(&dev->peer_list)) {
+ net = dev->netdev;
+ unregister_netdev(net);
+
+ if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
+ fw_core_remove_address_handler(&dev->handler);
+ if (dev->broadcast_rcv_context) {
+ fw_iso_context_stop(dev->broadcast_rcv_context);
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
+ dev->card);
+ fw_iso_context_destroy(dev->broadcast_rcv_context);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->packet_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->broadcasted_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->sent_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_del(&dev->dev_link);
+
+ free_netdev(net);
+ }
+
+ mutex_unlock(&fwnet_device_mutex);
+
+ return 0;
+}
+
+/*
+ * FIXME abort partially sent fragmented datagrams,
+ * discard partially received fragmented datagrams
+ */
+static void fwnet_update(struct fw_unit *unit)
+{
+ struct fw_device *device = fw_parent_device(unit);
+ struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
+ int generation;
+
+ generation = device->generation;
+
+ spin_lock_irq(&peer->dev->lock);
+ peer->node_id = device->node_id;
+ peer->generation = generation;
+ spin_unlock_irq(&peer->dev->lock);
+}
+
+static const struct ieee1394_device_id fwnet_id_table[] = {
+ {
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .specifier_id = IANA_SPECIFIER_ID,
+ .version = RFC2734_SW_VERSION,
+ },
+ { }
+};
+
+static struct fw_driver fwnet_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "net",
+ .bus = &fw_bus_type,
+ .probe = fwnet_probe,
+ .remove = fwnet_remove,
+ },
+ .update = fwnet_update,
+ .id_table = fwnet_id_table,
+};
+
+static const u32 rfc2374_unit_directory_data[] = {
+ 0x00040000, /* directory_length */
+ 0x1200005e, /* unit_specifier_id: IANA */
+ 0x81000003, /* textual descriptor offset */
+ 0x13000001, /* unit_sw_version: RFC 2734 */
+ 0x81000005, /* textual descriptor offset */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49414e41, /* I A N A */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49507634, /* I P v 4 */
+};
+
+static struct fw_descriptor rfc2374_unit_directory = {
+ .length = ARRAY_SIZE(rfc2374_unit_directory_data),
+ .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
+ .data = rfc2374_unit_directory_data
+};
+
+static int __init fwnet_init(void)
+{
+ int err;
+
+ err = fw_core_add_descriptor(&rfc2374_unit_directory);
+ if (err)
+ return err;
+
+ fwnet_packet_task_cache = kmem_cache_create("packet_task",
+ sizeof(struct fwnet_packet_task), 0, 0, NULL);
+ if (!fwnet_packet_task_cache) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = driver_register(&fwnet_driver.driver);
+ if (!err)
+ return 0;
+
+ kmem_cache_destroy(fwnet_packet_task_cache);
+out:
+ fw_core_remove_descriptor(&rfc2374_unit_directory);
+
+ return err;
+}
+module_init(fwnet_init);
+
+static void __exit fwnet_cleanup(void)
+{
+ driver_unregister(&fwnet_driver.driver);
+ kmem_cache_destroy(fwnet_packet_task_cache);
+ fw_core_remove_descriptor(&rfc2374_unit_directory);
+}
+module_exit(fwnet_cleanup);
+
+MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
+MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b5db8b8..9c2e100 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -140,7 +140,7 @@
dev_dbg(&adap->dev, "Interrupt: %x\n", i);
- wake_up_interruptible(&cpm->i2c_wait);
+ wake_up(&cpm->i2c_wait);
return i ? IRQ_HANDLED : IRQ_NONE;
}
@@ -364,12 +364,12 @@
dev_dbg(&adap->dev, "test ready.\n");
pmsg = &msgs[tptr];
if (pmsg->flags & I2C_M_RD)
- ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ ret = wait_event_timeout(cpm->i2c_wait,
(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) ||
!(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY),
1 * HZ);
else
- ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ ret = wait_event_timeout(cpm->i2c_wait,
!(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_READY),
1 * HZ);
if (ret == 0) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b606db8..ad8d201 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -339,7 +339,7 @@
* to get longer filter period for better noise suppression.
* The filter is iclk (fclk for HS) period.
*/
- if (dev->speed > 400 || cpu_is_omap_2430())
+ if (dev->speed > 400 || cpu_is_omap2430())
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index 03c8620..680e597 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -389,8 +389,7 @@
.init_chipset = init_chipset_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_ABUSE_PREFETCH,
+ .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 95f45f9..f102fcc 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -4,7 +4,7 @@
source "drivers/firewire/Kconfig"
config IEEE1394
- tristate "Stable FireWire stack"
+ tristate "Legacy alternative FireWire driver stack"
depends on PCI || BROKEN
help
IEEE 1394 describes a high performance serial bus, which is also
@@ -33,11 +33,9 @@
module will be called ohci1394.
NOTE:
-
- You should only build either ohci1394 or the new firewire-ohci driver,
- but not both. If you nevertheless want to install both, you should
- configure them only as modules and blacklist the driver(s) which you
- don't want to have auto-loaded. Add either
+ If you want to install firewire-ohci and ohci1394 together, you
+ should configure them only as modules and blacklist the driver(s)
+ which you don't want to have auto-loaded. Add either
blacklist firewire-ohci
or
@@ -46,12 +44,7 @@
blacklist dv1394
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution. The latter two modules should be
- blacklisted together with ohci1394 because they depend on ohci1394.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use "install modulename /bin/true" for the modules to be
- blacklisted.
+ depending on your distribution.
comment "PCILynx controller requires I2C"
depends on IEEE1394 && I2C=n
@@ -105,7 +98,7 @@
default n
config IEEE1394_ETH1394
- tristate "IP over 1394"
+ tristate "IP networking over 1394 (experimental)"
depends on IEEE1394 && EXPERIMENTAL && INET
select IEEE1394_ETH1394_ROM_ENTRY
help
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index 3fe158a..4216328 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -2750,3 +2750,26 @@
[0x1b] = KEY_B, /*recall*/
};
EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec);
+
+/* EVGA inDtube
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+IR_KEYTAB_TYPE ir_codes_evga_indtube[IR_KEYTAB_SIZE] = {
+ [0x12] = KEY_POWER,
+ [0x02] = KEY_MODE, /* TV */
+ [0x14] = KEY_MUTE,
+ [0x1a] = KEY_CHANNELUP,
+ [0x16] = KEY_TV2, /* PIP */
+ [0x1d] = KEY_VOLUMEUP,
+ [0x05] = KEY_CHANNELDOWN,
+ [0x0f] = KEY_PLAYPAUSE,
+ [0x19] = KEY_VOLUMEDOWN,
+ [0x1c] = KEY_REWIND,
+ [0x0d] = KEY_RECORD,
+ [0x18] = KEY_FORWARD,
+ [0x1e] = KEY_PREVIOUS,
+ [0x1b] = KEY_STOP,
+ [0x1f] = KEY_NEXT,
+ [0x13] = KEY_CAMERA,
+};
+EXPORT_SYMBOL_GPL(ir_codes_evga_indtube);
diff --git a/drivers/media/dvb/frontends/stv0900.h b/drivers/media/dvb/frontends/stv0900.h
index 8a1332c..bf4e9b6 100644
--- a/drivers/media/dvb/frontends/stv0900.h
+++ b/drivers/media/dvb/frontends/stv0900.h
@@ -29,6 +29,11 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
+struct stv0900_reg {
+ u16 addr;
+ u8 val;
+};
+
struct stv0900_config {
u8 demod_address;
u32 xtal;
@@ -38,7 +43,7 @@
u8 path1_mode;
u8 path2_mode;
-
+ struct stv0900_reg *ts_config_regs;
u8 tun1_maddress;/* 0, 1, 2, 3 for 0xc0, 0xc2, 0xc4, 0xc6 */
u8 tun2_maddress;
u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 8499bcf..1da045f 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -149,31 +149,31 @@
dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
}
-u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg_addr)
+u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg)
{
- u8 data[2];
int ret;
- struct i2c_msg i2cmsg = {
- .addr = i_params->i2c_addr,
- .flags = 0,
- .len = 2,
- .buf = data,
+ u8 b0[] = { MSB(reg), LSB(reg) };
+ u8 buf = 0;
+ struct i2c_msg msg[] = {
+ {
+ .addr = i_params->i2c_addr,
+ .flags = 0,
+ .buf = b0,
+ .len = 2,
+ }, {
+ .addr = i_params->i2c_addr,
+ .flags = I2C_M_RD,
+ .buf = &buf,
+ .len = 1,
+ },
};
- data[0] = MSB(reg_addr);
- data[1] = LSB(reg_addr);
+ ret = i2c_transfer(i_params->i2c_adap, msg, 2);
+ if (ret != 2)
+ dprintk(KERN_ERR "%s: i2c error %d, reg[0x%02x]\n",
+ __func__, ret, reg);
- ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1);
- if (ret != 1)
- dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
-
- i2cmsg.flags = I2C_M_RD;
- i2cmsg.len = 1;
- ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1);
- if (ret != 1)
- dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
-
- return data[0];
+ return buf;
}
void extract_mask_pos(u32 label, u8 *mask, u8 *pos)
@@ -712,6 +712,44 @@
return c_n;
}
+static int stv0900_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks)
+{
+ struct stv0900_state *state = fe->demodulator_priv;
+ struct stv0900_internal *i_params = state->internal;
+ enum fe_stv0900_demod_num demod = state->demod;
+ u8 err_val1, err_val0;
+ s32 err_field1, err_field0;
+ u32 header_err_val = 0;
+
+ *ucblocks = 0x0;
+ if (stv0900_get_standard(fe, demod) == STV0900_DVBS2_STANDARD) {
+ /* DVB-S2 delineator errors count */
+
+ /* retreiving number for errnous headers */
+ dmd_reg(err_field0, R0900_P1_BBFCRCKO0,
+ R0900_P2_BBFCRCKO0);
+ dmd_reg(err_field1, R0900_P1_BBFCRCKO1,
+ R0900_P2_BBFCRCKO1);
+
+ err_val1 = stv0900_read_reg(i_params, err_field1);
+ err_val0 = stv0900_read_reg(i_params, err_field0);
+ header_err_val = (err_val1<<8) | err_val0;
+
+ /* retreiving number for errnous packets */
+ dmd_reg(err_field0, R0900_P1_UPCRCKO0,
+ R0900_P2_UPCRCKO0);
+ dmd_reg(err_field1, R0900_P1_UPCRCKO1,
+ R0900_P2_UPCRCKO1);
+
+ err_val1 = stv0900_read_reg(i_params, err_field1);
+ err_val0 = stv0900_read_reg(i_params, err_field0);
+ *ucblocks = (err_val1<<8) | err_val0;
+ *ucblocks += header_err_val;
+ }
+
+ return 0;
+}
+
static int stv0900_read_snr(struct dvb_frontend *fe, u16 *snr)
{
*snr = stv0900_carr_get_quality(fe,
@@ -1355,7 +1393,7 @@
struct stv0900_state *state = fe->demodulator_priv;
enum fe_stv0900_error error = STV0900_NO_ERROR;
enum fe_stv0900_error demodError = STV0900_NO_ERROR;
- int selosci;
+ int selosci, i;
struct stv0900_inode *temp_int = find_inode(state->i2c_adap,
state->config->demod_address);
@@ -1402,7 +1440,23 @@
stv0900_write_bits(state->internal, F0900_P1_ROLLOFF_CONTROL, p_init->rolloff);
stv0900_write_bits(state->internal, F0900_P2_ROLLOFF_CONTROL, p_init->rolloff);
- stv0900_set_ts_parallel_serial(state->internal, p_init->path1_ts_clock, p_init->path2_ts_clock);
+ state->internal->ts_config = p_init->ts_config;
+ if (state->internal->ts_config == NULL)
+ stv0900_set_ts_parallel_serial(state->internal,
+ p_init->path1_ts_clock,
+ p_init->path2_ts_clock);
+ else {
+ for (i = 0; state->internal->ts_config[i].addr != 0xffff; i++)
+ stv0900_write_reg(state->internal,
+ state->internal->ts_config[i].addr,
+ state->internal->ts_config[i].val);
+
+ stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 1);
+ stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 0);
+ stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 1);
+ stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 0);
+ }
+
stv0900_write_bits(state->internal, F0900_P1_TUN_MADDRESS, p_init->tun1_maddress);
switch (p_init->tuner1_adc) {
case 1:
@@ -1882,6 +1936,7 @@
.read_ber = stv0900_read_ber,
.read_signal_strength = stv0900_read_signal_strength,
.read_snr = stv0900_read_snr,
+ .read_ucblocks = stv0900_read_ucblocks,
};
struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
@@ -1915,6 +1970,7 @@
init_params.tun1_iq_inversion = STV0900_IQ_NORMAL;
init_params.tuner1_adc = config->tun1_adc;
init_params.path2_ts_clock = config->path2_mode;
+ init_params.ts_config = config->ts_config_regs;
init_params.tun2_maddress = config->tun2_maddress;
init_params.tuner2_adc = config->tun2_adc;
init_params.tun2_iq_inversion = STV0900_IQ_SWAPPED;
diff --git a/drivers/media/dvb/frontends/stv0900_priv.h b/drivers/media/dvb/frontends/stv0900_priv.h
index 67dc8ec..5ed7a14 100644
--- a/drivers/media/dvb/frontends/stv0900_priv.h
+++ b/drivers/media/dvb/frontends/stv0900_priv.h
@@ -271,6 +271,7 @@
/* IQ from the tuner2 to the demod */
enum stv0900_iq_inversion tun2_iq_inversion;
+ struct stv0900_reg *ts_config;
};
struct stv0900_search_params {
@@ -363,6 +364,7 @@
u8 i2c_addr;
u8 clkmode;/* 0 for CLKI, 2 for XTALI */
u8 chip_id;
+ struct stv0900_reg *ts_config;
enum fe_stv0900_error errs;
int dmds_used;
};
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 96ef745..488bdfb 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -2674,7 +2674,7 @@
static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
{
- struct stv090x_short_frame_crloop *short_crl;
+ struct stv090x_short_frame_crloop *short_crl = NULL;
s32 index = 0;
u8 aclc = 0x0b;
@@ -2694,10 +2694,13 @@
break;
}
- if (state->dev_ver >= 0x30)
- short_crl = stv090x_s2_short_crl_cut20;
- else if (state->dev_ver >= 0x20)
+ if (state->dev_ver >= 0x30) {
+ /* Cut 3.0 and up */
short_crl = stv090x_s2_short_crl_cut30;
+ } else {
+ /* Cut 2.0 and up: we don't support cuts older than 2.0 */
+ short_crl = stv090x_s2_short_crl_cut20;
+ }
if (state->srate <= 3000000)
aclc = short_crl[index].crl_2;
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 4302c56..cc8862c 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -210,6 +210,7 @@
{ TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 },
+ { TDA10048_CLK_16000, TDA10048_IF_3800, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 },
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 32be382..a246903 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1422,8 +1422,8 @@
struct smscore_gpio_config *pGpioConfig) {
u32 totalLen;
- u32 TranslatedPinNum;
- u32 GroupNum;
+ u32 TranslatedPinNum = 0;
+ u32 GroupNum = 0;
u32 ElectricChar;
u32 groupCfg;
void *buffer;
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 3936238..3cd76dd 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -322,7 +322,9 @@
v->rangehigh = FREQ_MAX * FREQ_MUL;
v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
if (r->tunchk & TEA5764_TUNCHK_STEREO)
- v->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ v->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ else
+ v->rxsubchans = V4L2_TUNER_SUB_MONO;
v->audmode = tea5764_get_audout_mode(radio);
v->signal = TEA5764_TUNCHK_LEVEL(r->tunchk) * 0xffff / 0xf;
v->afc = TEA5764_TUNCHK_IFCNT(r->tunchk);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 94f4405..061e147 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -866,9 +866,13 @@
module will be called w9968cf.
config USB_OV511
- tristate "USB OV511 Camera support"
+ tristate "USB OV511 Camera support (DEPRECATED)"
depends on VIDEO_V4L1
---help---
+ This driver is DEPRECATED please use the gspca ov519 module
+ instead. Note that for the ov511 / ov518 support of the gspca module
+ you need atleast version 0.6.0 of libv4l.
+
Say Y here if you want to connect this type of camera to your
computer's USB port. See <file:Documentation/video4linux/ov511.txt>
for more information and for a list of supported cameras.
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 8e35c3a..5136df1 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -61,6 +61,8 @@
switch (qctrl->id) {
/* Standard V4L2 controls */
+ case V4L2_CID_USER_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_HUE:
case V4L2_CID_SATURATION:
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 6a94640..28f48f4 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -1052,22 +1052,13 @@
/* Set resolution of the video */
int cx231xx_resolution_set(struct cx231xx *dev)
{
- int width, height;
- u32 hscale, vscale;
- int status = 0;
-
- width = dev->width;
- height = dev->height;
-
- get_scale(dev, width, height, &hscale, &vscale);
-
/* set horzontal scale */
- status = vid_blk_write_word(dev, HSCALE_CTRL, hscale);
+ int status = vid_blk_write_word(dev, HSCALE_CTRL, dev->hscale);
+ if (status)
+ return status;
/* set vertical scale */
- status = vid_blk_write_word(dev, VSCALE_CTRL, vscale);
-
- return status;
+ return vid_blk_write_word(dev, VSCALE_CTRL, dev->vscale);
}
/******************************************************************************
@@ -2055,7 +2046,7 @@
int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type)
{
- int rc;
+ int rc = -1;
u32 ep_mask = -1;
struct pcb_config *pcb_config;
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index a23ae73..609bae6 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -893,9 +893,9 @@
return 0;
}
-void get_scale(struct cx231xx *dev,
- unsigned int width, unsigned int height,
- unsigned int *hscale, unsigned int *vscale)
+static void get_scale(struct cx231xx *dev,
+ unsigned int width, unsigned int height,
+ unsigned int *hscale, unsigned int *vscale)
{
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
@@ -907,10 +907,6 @@
*vscale = (((unsigned long)maxh) << 12) / height - 4096L;
if (*vscale >= 0x4000)
*vscale = 0x3fff;
-
- dev->hscale = *hscale;
- dev->vscale = *vscale;
-
}
/* ------------------------------------------------------------------
@@ -955,8 +951,8 @@
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- int width = f->fmt.pix.width;
- int height = f->fmt.pix.height;
+ unsigned int width = f->fmt.pix.width;
+ unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
unsigned int hscale, vscale;
@@ -971,17 +967,7 @@
/* width must even because of the YUYV format
height must be even because of interlacing */
- height &= 0xfffe;
- width &= 0xfffe;
-
- if (unlikely(height < 32))
- height = 32;
- if (unlikely(height > maxh))
- height = maxh;
- if (unlikely(width < 48))
- width = 48;
- if (unlikely(width > maxw))
- width = maxw;
+ v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
get_scale(dev, width, height, &hscale, &vscale);
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index e38eb2d..a0f823a 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -722,9 +722,6 @@
int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u8 input);
int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev);
int cx231xx_set_audio_input(struct cx231xx *dev, u8 input);
-void get_scale(struct cx231xx *dev,
- unsigned int width, unsigned int height,
- unsigned int *hscale, unsigned int *vscale);
/* Provided by cx231xx-video.c */
int cx231xx_register_extension(struct cx231xx_ops *dev);
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 8ded529..4c8e958 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -500,6 +500,8 @@
int err;
switch (qctrl->id) {
+ case V4L2_CID_MPEG_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_MPEG_STREAM_TYPE:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index e236df2..48a9751 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -45,6 +45,7 @@
#include "dibx000_common.h"
#include "zl10353.h"
#include "stv0900.h"
+#include "stv0900_reg.h"
#include "stv6110.h"
#include "lnbh24.h"
#include "cx24116.h"
@@ -242,12 +243,22 @@
.if_lvl = 6, .rfagc_top = 0x37 },
};
+static struct tda18271_std_map hauppauge_hvr1200_tda18271_std_map = {
+ .dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+};
+
static struct tda18271_config hauppauge_tda18271_config = {
.std_map = &hauppauge_tda18271_std_map,
.gate = TDA18271_GATE_ANALOG,
};
static struct tda18271_config hauppauge_hvr1200_tuner_config = {
+ .std_map = &hauppauge_hvr1200_tda18271_std_map,
.gate = TDA18271_GATE_ANALOG,
};
@@ -370,13 +381,25 @@
.disable_i2c_gate_ctrl = 1,
};
+static struct stv0900_reg stv0900_ts_regs[] = {
+ { R0900_TSGENERAL, 0x00 },
+ { R0900_P1_TSSPEED, 0x40 },
+ { R0900_P2_TSSPEED, 0x40 },
+ { R0900_P1_TSCFGM, 0xc0 },
+ { R0900_P2_TSCFGM, 0xc0 },
+ { R0900_P1_TSCFGH, 0xe0 },
+ { R0900_P2_TSCFGH, 0xe0 },
+ { R0900_P1_TSCFGL, 0x20 },
+ { R0900_P2_TSCFGL, 0x20 },
+ { 0xffff, 0xff }, /* terminate */
+};
+
static struct stv0900_config netup_stv0900_config = {
.demod_address = 0x68,
.xtal = 27000000,
.clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */
.diseqc_mode = 2,/* 2/3 PWM */
- .path1_mode = 2,/*Serial continues clock */
- .path2_mode = 2,/*Serial continues clock */
+ .ts_config_regs = stv0900_ts_regs,
.tun1_maddress = 0,/* 0x60 */
.tun2_maddress = 3,/* 0x63 */
.tun1_adc = 1,/* 1 Vpp */
@@ -736,7 +759,8 @@
if (!dvb_attach(lnbh24_attach,
fe0->dvb.frontend,
&i2c_bus->i2c_adap,
- LNBH24_PCL, 0, 0x09))
+ LNBH24_PCL,
+ LNBH24_TTX, 0x09))
printk(KERN_ERR
"No LNBH24 found!\n");
@@ -756,7 +780,8 @@
if (!dvb_attach(lnbh24_attach,
fe0->dvb.frontend,
&i2c_bus->i2c_adap,
- LNBH24_PCL, 0, 0x0a))
+ LNBH24_PCL,
+ LNBH24_TTX, 0x0a))
printk(KERN_ERR
"No LNBH24 found!\n");
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 66bbd2e..70836af 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -963,15 +963,8 @@
}
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 94b7a52..a5cc1c1 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1524,33 +1524,45 @@
},
.mpeg = CX88_MPEG_DVB,
},
+ /* Terry Wu <terrywu2009@gmail.com> */
+ /* TV Audio : set GPIO 2, 18, 19 value to 0, 1, 0 */
+ /* FM Audio : set GPIO 2, 18, 19 value to 0, 0, 0 */
+ /* Line-in Audio : set GPIO 2, 18, 19 value to 0, 1, 1 */
+ /* Mute Audio : set GPIO 2 value to 1 */
[CX88_BOARD_WINFAST_TV2000_XP_GLOBAL] = {
- .name = "Winfast TV2000 XP Global",
+ .name = "Leadtek TV2000 XP Global",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
+ .radio_type = TUNER_XC2028,
+ .radio_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
- .gpio0 = 0x0400, /* pin 2:mute = 0 (off?) */
+ .gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
- .gpio2 = 0x0800, /* pin 19:audio = 0 (tv) */
-
+ .gpio2 = 0x0C04, /* pin 18 = 1, pin 19 = 0 */
+ .gpio3 = 0x0000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
- .gpio0 = 0x0400, /* probably? or 0x0404 to turn mute on */
+ .gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
- .gpio2 = 0x0808, /* pin 19:audio = 1 (line) */
-
+ .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
+ .gpio3 = 0x0000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
+ .gpio3 = 0x0000,
} },
.radio = {
.type = CX88_RADIO,
- .gpio0 = 0x004ff,
- .gpio1 = 0x010ff,
- .gpio2 = 0x0ff,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C00, /* pin 18 = 0, pin 19 = 0 */
+ .gpio3 = 0x0000,
},
},
[CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
@@ -2438,6 +2450,41 @@
.subvendor = 0x107d,
.subdevice = 0x6654,
.card = CX88_BOARD_WINFAST_DTV1800H,
+ }, {
+ /* PVR2000 PAL Model [107d:6630] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6630,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 PAL Model [107d:6638] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6638,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:6631] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6631,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:6637] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6637,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:663d] */
+ .subvendor = 0x107d,
+ .subdevice = 0x663d,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* DV2000 NTSC Model [107d:6621] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6621,
+ .card = CX88_BOARD_WINFAST_DV2000,
+ }, {
+ /* TV2000 XP Global [107d:6618] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6618,
+ .card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
},
};
@@ -2446,12 +2493,6 @@
static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
- /* This is just for the "Winfast 2000XP Expert" board ATM; I don't have data on
- * any others.
- *
- * Byte 0 is 1 on the NTSC board.
- */
-
if (eeprom_data[4] != 0x7d ||
eeprom_data[5] != 0x10 ||
eeprom_data[7] != 0x66) {
@@ -2459,8 +2500,19 @@
return;
}
- core->board.tuner_type = (eeprom_data[6] == 0x13) ?
- TUNER_PHILIPS_FM1236_MK3 : TUNER_PHILIPS_FM1216ME_MK3;
+ /* Terry Wu <terrywu2009@gmail.com> */
+ switch (eeprom_data[6]) {
+ case 0x13: /* SSID 6613 for TV2000 XP Expert NTSC Model */
+ case 0x21: /* SSID 6621 for DV2000 NTSC Model */
+ case 0x31: /* SSID 6631 for PVR2000 NTSC Model */
+ case 0x37: /* SSID 6637 for PVR2000 NTSC Model */
+ case 0x3d: /* SSID 6637 for PVR2000 NTSC Model */
+ core->board.tuner_type = TUNER_PHILIPS_FM1236_MK3;
+ break;
+ default:
+ core->board.tuner_type = TUNER_PHILIPS_FM1216ME_MK3;
+ break;
+ }
info_printk(core, "Leadtek Winfast 2000XP Expert config: "
"tuner=%d, eeprom[0]=0x%02x\n",
@@ -2713,7 +2765,6 @@
{
/* Board-specific callbacks */
switch (core->boardnr) {
- case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
case CX88_BOARD_GENIATECH_X8000_MT:
case CX88_BOARD_KWORLD_ATSC_120:
@@ -2725,6 +2776,7 @@
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
return cx88_xc3028_winfast1800h_callback(core, command, arg);
}
@@ -2914,6 +2966,7 @@
udelay(1000);
break;
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
/* GPIO 12 (xc3028 tuner reset) */
cx_set(MO_GP1_IO, 0x1010);
@@ -2950,6 +3003,7 @@
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
ctl->demod = XC3028_FE_OREN538;
break;
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
/*
@@ -2993,6 +3047,8 @@
if (0 == core->i2c_rc)
gdi_eeprom(core, eeprom);
break;
+ case CX88_BOARD_LEADTEK_PVR2000:
+ case CX88_BOARD_WINFAST_DV2000:
case CX88_BOARD_WINFAST2000XP_EXPERT:
if (0 == core->i2c_rc)
leadtek_eeprom(core, eeprom);
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 0ccac70..b127708 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1111,15 +1111,8 @@
}
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 00cc791..c43fdb9 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -139,6 +139,24 @@
{ -1, -1, -1, -1},
};
+/* Evga inDtube
+ GPIO0 - Enable digital power (s5h1409) - low to enable
+ GPIO1 - Enable analog power (tvp5150/emp202) - low to enable
+ GPIO4 - xc3028 reset
+ GOP3 - s5h1409 reset
+ */
+static struct em28xx_reg_seq evga_indtube_analog[] = {
+ {EM28XX_R08_GPIO, 0x79, 0xff, 60},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq evga_indtube_digital[] = {
+ {EM28XX_R08_GPIO, 0x7a, 0xff, 1},
+ {EM2880_R04_GPO, 0x04, 0xff, 10},
+ {EM2880_R04_GPO, 0x0c, 0xff, 1},
+ { -1, -1, -1, -1},
+};
+
/* Callback for the most boards */
static struct em28xx_reg_seq default_tuner_gpio[] = {
{EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
@@ -1449,6 +1467,33 @@
.gpio = terratec_av350_unmute_gpio,
} },
},
+ [EM2882_BOARD_EVGA_INDTUBE] = {
+ .name = "Evga inDtube",
+ .tuner_type = TUNER_XC2028,
+ .tuner_gpio = default_tuner_gpio,
+ .decoder = EM28XX_TVP5150,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
+ .mts_firmware = 1,
+ .has_dvb = 1,
+ .dvb_gpio = evga_indtube_digital,
+ .ir_codes = ir_codes_evga_indtube,
+ .input = { {
+ .type = EM28XX_VMUX_TELEVISION,
+ .vmux = TVP5150_COMPOSITE0,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = evga_indtube_analog,
+ }, {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = evga_indtube_analog,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = evga_indtube_analog,
+ } },
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1571,6 +1616,7 @@
{0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF},
{0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
{0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028},
+ {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028},
};
/* I2C devicelist hash table for devices with generic USB IDs */
@@ -1834,6 +1880,10 @@
ctl->demod = XC3028_FE_CHINA;
ctl->fname = XC2028_DEFAULT_FIRMWARE;
break;
+ case EM2882_BOARD_EVGA_INDTUBE:
+ ctl->demod = XC3028_FE_CHINA;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ break;
default:
ctl->demod = XC3028_FE_OREN538;
}
@@ -2101,6 +2151,12 @@
case EM2880_BOARD_MSI_DIGIVOX_AD:
if (!em28xx_hint_board(dev))
em28xx_set_model(dev);
+
+ /* In cases where we had to use a board hint, the call to
+ em28xx_set_mode() in em28xx_pre_card_setup() was a no-op,
+ so make the call now so the analog GPIOs are set properly
+ before probing the i2c bus. */
+ em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
break;
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 563dd2b..e7b47c8 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -445,6 +445,7 @@
}
break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
+ case EM2882_BOARD_EVGA_INDTUBE:
dvb->frontend = dvb_attach(s5h1409_attach,
&em28xx_s5h1409_with_xc3028,
&dev->i2c_adap);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 882796e..8fe1bee 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -687,8 +687,8 @@
{
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- int width = f->fmt.pix.width;
- int height = f->fmt.pix.height;
+ unsigned int width = f->fmt.pix.width;
+ unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
unsigned int hscale, vscale;
@@ -701,34 +701,20 @@
return -EINVAL;
}
- /* width must even because of the YUYV format
- height must be even because of interlacing */
- height &= 0xfffe;
- width &= 0xfffe;
-
- if (unlikely(height < 32))
- height = 32;
- if (unlikely(height > maxh))
- height = maxh;
- if (unlikely(width < 48))
- width = 48;
- if (unlikely(width > maxw))
- width = maxw;
-
if (dev->board.is_em2800) {
/* the em2800 can only scale down to 50% */
- if (height % (maxh / 2))
- height = maxh;
- if (width % (maxw / 2))
- width = maxw;
- /* according to empiatech support */
- /* the MaxPacketSize is to small to support */
- /* framesizes larger than 640x480 @ 30 fps */
- /* or 640x576 @ 25 fps. As this would cut */
- /* of a part of the image we prefer */
- /* 360x576 or 360x480 for now */
+ height = height > (3 * maxh / 4) ? maxh : maxh / 2;
+ width = width > (3 * maxw / 4) ? maxw : maxw / 2;
+ /* According to empiatech support the MaxPacketSize is too small
+ * to support framesizes larger than 640x480 @ 30 fps or 640x576
+ * @ 25 fps. As this would cut of a part of the image we prefer
+ * 360x576 or 360x480 for now */
if (width == maxw && height == maxh)
width /= 2;
+ } else {
+ /* width must even because of the YUYV format
+ height must be even because of interlacing */
+ v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
}
get_scale(dev, width, height, &hscale, &vscale);
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 8bf81be..813ce45 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -106,6 +106,7 @@
#define EM2860_BOARD_TERRATEC_GRABBY 67
#define EM2860_BOARD_TERRATEC_AV350 68
#define EM2882_BOARD_KWORLD_ATSC_315U 69
+#define EM2882_BOARD_EVGA_INDTUBE 70
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index f7e0355..1e89600 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1042,13 +1042,11 @@
for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
if (gspca_dev->ctrl_dis & (1 << i))
continue;
- if (ctrls->qctrl.id < id)
+ if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id)
continue;
- if (ctrls != NULL) {
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id
+ if (ctrls && gspca_dev->sd_desc->ctrls[i].qctrl.id
> ctrls->qctrl.id)
- continue;
- }
+ continue;
ctrls = &gspca_dev->sd_desc->ctrls[i];
}
} else {
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 188866a..2f6e135 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -50,12 +50,18 @@
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ __u8 packet_nr;
+
char bridge;
#define BRIDGE_OV511 0
#define BRIDGE_OV511PLUS 1
#define BRIDGE_OV518 2
#define BRIDGE_OV518PLUS 3
#define BRIDGE_OV519 4
+#define BRIDGE_MASK 7
+
+ char invert_led;
+#define BRIDGE_INVERT_LED 8
/* Determined by sensor type */
__u8 sif;
@@ -65,22 +71,25 @@
__u8 colors;
__u8 hflip;
__u8 vflip;
+ __u8 autobrightness;
+ __u8 freq;
__u8 stopped; /* Streaming is temporarily paused */
- __u8 frame_rate; /* current Framerate (OV519 only) */
- __u8 clockdiv; /* clockdiv override for OV519 only */
+ __u8 frame_rate; /* current Framerate */
+ __u8 clockdiv; /* clockdiv override */
char sensor; /* Type of image sensor chip (SEN_*) */
#define SEN_UNKNOWN 0
#define SEN_OV6620 1
#define SEN_OV6630 2
-#define SEN_OV7610 3
-#define SEN_OV7620 4
-#define SEN_OV7640 5
-#define SEN_OV7670 6
-#define SEN_OV76BE 7
-#define SEN_OV8610 8
+#define SEN_OV66308AF 3
+#define SEN_OV7610 4
+#define SEN_OV7620 5
+#define SEN_OV7640 6
+#define SEN_OV7670 7
+#define SEN_OV76BE 8
+#define SEN_OV8610 9
};
/* V4L2 controls supported by the driver */
@@ -94,11 +103,17 @@
static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setcolors(struct gspca_dev *gspca_dev);
+static void setautobrightness(struct sd *sd);
+static void setfreq(struct sd *sd);
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -141,7 +156,7 @@
.set = sd_setcolors,
.get = sd_getcolors,
},
-/* next controls work with ov7670 only */
+/* The flip controls work with ov7670 only */
#define HFLIP_IDX 3
{
{
@@ -172,6 +187,51 @@
.set = sd_setvflip,
.get = sd_getvflip,
},
+#define AUTOBRIGHT_IDX 5
+ {
+ {
+ .id = V4L2_CID_AUTOBRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Brightness",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AUTOBRIGHT_DEF 1
+ .default_value = AUTOBRIGHT_DEF,
+ },
+ .set = sd_setautobrightness,
+ .get = sd_getautobrightness,
+ },
+#define FREQ_IDX 6
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
+ .step = 1,
+#define FREQ_DEF 0
+ .default_value = FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
+#define OV7670_FREQ_IDX 7
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 3, /* 0: 0, 1: 50Hz, 2:60Hz 3: Auto Hz */
+ .step = 1,
+#define OV7670_FREQ_DEF 3
+ .default_value = OV7670_FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
};
static const struct v4l2_pix_format ov519_vga_mode[] = {
@@ -187,11 +247,21 @@
.priv = 0},
};
static const struct v4l2_pix_format ov519_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
+ {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
{352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 8 + 590,
@@ -199,42 +269,118 @@
.priv = 0},
};
+/* Note some of the sizeimage values for the ov511 / ov518 may seem
+ larger then necessary, however they need to be this big as the ov511 /
+ ov518 always fills the entire isoc frame, using 0 padding bytes when
+ it doesn't have any data. So with low framerates the amount of data
+ transfered can become quite large (libv4l will remove all the 0 padding
+ in userspace). */
static const struct v4l2_pix_format ov518_vga_mode[] = {
{320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 320,
- .sizeimage = 320 * 240 * 3 / 8 + 590,
+ .sizeimage = 320 * 240 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 640,
- .sizeimage = 640 * 480 * 3 / 8 + 590,
+ .sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format ov518_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
{176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 176,
- .sizeimage = 40000,
+ .sizeimage = 70000,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
+ {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
{352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 352,
- .sizeimage = 352 * 288 * 3 / 8 + 590,
+ .sizeimage = 352 * 288 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
+static const struct v4l2_pix_format ov511_vga_mode[] = {
+ {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {640, 480, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 2,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 0},
+};
+static const struct v4l2_pix_format ov511_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
+ {176, 144, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
+ {352, 288, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 0},
+};
/* Registers common to OV511 / OV518 */
+#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
#define R51x_SYS_RESET 0x50
+ /* Reset type flags */
+ #define OV511_RESET_OMNICE 0x08
#define R51x_SYS_INIT 0x53
#define R51x_SYS_SNAP 0x52
#define R51x_SYS_CUST_ID 0x5F
#define R51x_COMP_LUT_BEGIN 0x80
/* OV511 Camera interface register numbers */
+#define R511_CAM_DELAY 0x10
+#define R511_CAM_EDGE 0x11
+#define R511_CAM_PXCNT 0x12
+#define R511_CAM_LNCNT 0x13
+#define R511_CAM_PXDIV 0x14
+#define R511_CAM_LNDIV 0x15
+#define R511_CAM_UV_EN 0x16
+#define R511_CAM_LINE_MODE 0x17
+#define R511_CAM_OPTS 0x18
+
+#define R511_SNAP_FRAME 0x19
+#define R511_SNAP_PXCNT 0x1A
+#define R511_SNAP_LNCNT 0x1B
+#define R511_SNAP_PXDIV 0x1C
+#define R511_SNAP_LNDIV 0x1D
+#define R511_SNAP_UV_EN 0x1E
+#define R511_SNAP_UV_EN 0x1E
+#define R511_SNAP_OPTS 0x1F
+
+#define R511_DRAM_FLOW_CTL 0x20
+#define R511_FIFO_OPTS 0x31
+#define R511_I2C_CTL 0x40
#define R511_SYS_LED_CTL 0x55 /* OV511+ only */
-#define OV511_RESET_NOREGS 0x3F /* All but OV511 & regs */
+#define R511_COMP_EN 0x78
+#define R511_COMP_LUT_EN 0x79
/* OV518 Camera interface register numbers */
#define R518_GPIO_OUT 0x56 /* OV518(+) only */
@@ -383,7 +529,7 @@
{ 0x28, 0x05 },
{ 0x2a, 0x04 }, /* Disable framerate adjust */
/* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */
- { 0x2d, 0x99 },
+ { 0x2d, 0x85 },
{ 0x33, 0xa0 }, /* Color Processing Parameter */
{ 0x34, 0xd2 }, /* Max A/D range */
{ 0x38, 0x8b },
@@ -416,7 +562,7 @@
{ 0x07, 0x2d }, /* Sharpness */
{ 0x0c, 0x20 },
{ 0x0d, 0x20 },
- { 0x0e, 0x20 },
+ { 0x0e, 0xa0 }, /* Was 0x20, bit7 enables a 2x gain which we need */
{ 0x0f, 0x05 },
{ 0x10, 0x9a },
{ 0x11, 0x00 }, /* Pixel clock = fastest */
@@ -558,7 +704,7 @@
{ 0x23, 0x00 },
{ 0x26, 0xa2 },
{ 0x27, 0xea },
- { 0x28, 0x20 },
+ { 0x28, 0x22 }, /* Was 0x20, bit1 enables a 2x gain which we need */
{ 0x29, 0x00 },
{ 0x2a, 0x10 },
{ 0x2b, 0x00 },
@@ -999,13 +1145,128 @@
return ret;
}
+static int ov511_i2c_w(struct sd *sd, __u8 reg, __u8 value)
+{
+ int rc, retries;
+
+ PDEBUG(D_USBO, "i2c 0x%02x -> [0x%02x]", value, reg);
+
+ /* Three byte write cycle */
+ for (retries = 6; ; ) {
+ /* Select camera register */
+ rc = reg_w(sd, R51x_I2C_SADDR_3, reg);
+ if (rc < 0)
+ return rc;
+
+ /* Write "value" to I2C data port of OV511 */
+ rc = reg_w(sd, R51x_I2C_DATA, value);
+ if (rc < 0)
+ return rc;
+
+ /* Initiate 3-byte write cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x01);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+ if (--retries < 0) {
+ PDEBUG(D_USBO, "i2c write retries exhausted");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int ov511_i2c_r(struct sd *sd, __u8 reg)
+{
+ int rc, value, retries;
+
+ /* Two byte write cycle */
+ for (retries = 6; ; ) {
+ /* Select camera register */
+ rc = reg_w(sd, R51x_I2C_SADDR_2, reg);
+ if (rc < 0)
+ return rc;
+
+ /* Initiate 2-byte write cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x03);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+
+ /* I2C abort */
+ reg_w(sd, R511_I2C_CTL, 0x10);
+
+ if (--retries < 0) {
+ PDEBUG(D_USBI, "i2c write retries exhausted");
+ return -1;
+ }
+ }
+
+ /* Two byte read cycle */
+ for (retries = 6; ; ) {
+ /* Initiate 2-byte read cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x05);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+
+ /* I2C abort */
+ rc = reg_w(sd, R511_I2C_CTL, 0x10);
+ if (rc < 0)
+ return rc;
+
+ if (--retries < 0) {
+ PDEBUG(D_USBI, "i2c read retries exhausted");
+ return -1;
+ }
+ }
+
+ value = reg_r(sd, R51x_I2C_DATA);
+
+ PDEBUG(D_USBI, "i2c [0x%02X] -> 0x%02X", reg, value);
+
+ /* This is needed to make i2c_w() work */
+ rc = reg_w(sd, R511_I2C_CTL, 0x05);
+ if (rc < 0)
+ return rc;
+
+ return value;
+}
/*
* The OV518 I2C I/O procedure is different, hence, this function.
* This is normally only called from i2c_w(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
-static int i2c_w(struct sd *sd,
+static int ov518_i2c_w(struct sd *sd,
__u8 reg,
__u8 value)
{
@@ -1040,7 +1301,7 @@
* This is normally only called from i2c_r(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
-static int i2c_r(struct sd *sd, __u8 reg)
+static int ov518_i2c_r(struct sd *sd, __u8 reg)
{
int rc, value;
@@ -1063,6 +1324,34 @@
return value;
}
+static int i2c_w(struct sd *sd, __u8 reg, __u8 value)
+{
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ return ov511_i2c_w(sd, reg, value);
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ case BRIDGE_OV519:
+ return ov518_i2c_w(sd, reg, value);
+ }
+ return -1; /* Should never happen */
+}
+
+static int i2c_r(struct sd *sd, __u8 reg)
+{
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ return ov511_i2c_r(sd, reg);
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ case BRIDGE_OV519:
+ return ov518_i2c_r(sd, reg);
+ }
+ return -1; /* Should never happen */
+}
+
/* Writes bits at positions specified by mask to an I2C reg. Bits that are in
* the same position as 1's in "mask" are cleared and set to "value". Bits
* that are in the same position as 0's in "mask" are preserved, regardless
@@ -1242,7 +1531,6 @@
}
/* Set sensor-specific vars */
-/* sd->sif = 0; already done */
return 0;
}
@@ -1279,15 +1567,13 @@
}
} else if ((rc & 3) == 1) {
/* I don't know what's different about the 76BE yet. */
- if (i2c_r(sd, 0x15) & 1)
+ if (i2c_r(sd, 0x15) & 1) {
PDEBUG(D_PROBE, "Sensor is an OV7620AE");
- else
+ sd->sensor = SEN_OV7620;
+ } else {
PDEBUG(D_PROBE, "Sensor is an OV76BE");
-
- /* OV511+ will return all zero isoc data unless we
- * configure the sensor as a 7620. Someone needs to
- * find the exact reg. setting that causes this. */
- sd->sensor = SEN_OV76BE;
+ sd->sensor = SEN_OV76BE;
+ }
} else if ((rc & 3) == 0) {
/* try to read product id registers */
high = i2c_r(sd, 0x0a);
@@ -1333,7 +1619,6 @@
}
/* Set sensor-specific vars */
-/* sd->sif = 0; already done */
return 0;
}
@@ -1362,13 +1647,14 @@
break;
case 0x01:
sd->sensor = SEN_OV6620;
+ PDEBUG(D_PROBE, "Sensor is an OV6620");
break;
case 0x02:
sd->sensor = SEN_OV6630;
PDEBUG(D_PROBE, "Sensor is an OV66308AE");
break;
case 0x03:
- sd->sensor = SEN_OV6630;
+ sd->sensor = SEN_OV66308AF;
PDEBUG(D_PROBE, "Sensor is an OV66308AF");
break;
case 0x90:
@@ -1391,6 +1677,9 @@
/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
static void ov51x_led_control(struct sd *sd, int on)
{
+ if (sd->invert_led)
+ on = !on;
+
switch (sd->bridge) {
/* OV511 has no LED control */
case BRIDGE_OV511PLUS:
@@ -1406,9 +1695,31 @@
}
}
-/* OV518 quantization tables are 8x4 (instead of 8x8) */
-static int ov518_upload_quan_tables(struct sd *sd)
+static int ov51x_upload_quan_tables(struct sd *sd)
{
+ const unsigned char yQuanTable511[] = {
+ 0, 1, 1, 2, 2, 3, 3, 4,
+ 1, 1, 1, 2, 2, 3, 4, 4,
+ 1, 1, 2, 2, 3, 4, 4, 4,
+ 2, 2, 2, 3, 4, 4, 4, 4,
+ 2, 2, 3, 4, 4, 5, 5, 5,
+ 3, 3, 4, 4, 5, 5, 5, 5,
+ 3, 4, 4, 4, 5, 5, 5, 5,
+ 4, 4, 4, 4, 5, 5, 5, 5
+ };
+
+ const unsigned char uvQuanTable511[] = {
+ 0, 2, 2, 3, 4, 4, 4, 4,
+ 2, 2, 2, 4, 4, 4, 4, 4,
+ 2, 2, 3, 4, 4, 4, 4, 4,
+ 3, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4
+ };
+
+ /* OV518 quantization tables are 8x4 (instead of 8x8) */
const unsigned char yQuanTable518[] = {
5, 4, 5, 6, 6, 7, 7, 7,
5, 5, 5, 5, 6, 7, 7, 7,
@@ -1423,14 +1734,23 @@
7, 7, 7, 7, 7, 7, 8, 8
};
- const unsigned char *pYTable = yQuanTable518;
- const unsigned char *pUVTable = uvQuanTable518;
+ const unsigned char *pYTable, *pUVTable;
unsigned char val0, val1;
- int i, rc, reg = R51x_COMP_LUT_BEGIN;
+ int i, size, rc, reg = R51x_COMP_LUT_BEGIN;
PDEBUG(D_PROBE, "Uploading quantization tables");
- for (i = 0; i < 16; i++) {
+ if (sd->bridge == BRIDGE_OV511 || sd->bridge == BRIDGE_OV511PLUS) {
+ pYTable = yQuanTable511;
+ pUVTable = uvQuanTable511;
+ size = 32;
+ } else {
+ pYTable = yQuanTable518;
+ pUVTable = uvQuanTable518;
+ size = 16;
+ }
+
+ for (i = 0; i < size; i++) {
val0 = *pYTable++;
val1 = *pYTable++;
val0 &= 0x0f;
@@ -1445,7 +1765,7 @@
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
- rc = reg_w(sd, reg + 16, val0);
+ rc = reg_w(sd, reg + size, val0);
if (rc < 0)
return rc;
@@ -1455,6 +1775,87 @@
return 0;
}
+/* This initializes the OV511/OV511+ and the sensor */
+static int ov511_configure(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int rc;
+
+ /* For 511 and 511+ */
+ const struct ov_regvals init_511[] = {
+ { R51x_SYS_RESET, 0x7f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x7f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x3f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x3d },
+ };
+
+ const struct ov_regvals norm_511[] = {
+ { R511_DRAM_FLOW_CTL, 0x01 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R51x_SYS_SNAP, 0x02 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R511_FIFO_OPTS, 0x1f },
+ { R511_COMP_EN, 0x00 },
+ { R511_COMP_LUT_EN, 0x03 },
+ };
+
+ const struct ov_regvals norm_511_p[] = {
+ { R511_DRAM_FLOW_CTL, 0xff },
+ { R51x_SYS_SNAP, 0x00 },
+ { R51x_SYS_SNAP, 0x02 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R511_FIFO_OPTS, 0xff },
+ { R511_COMP_EN, 0x00 },
+ { R511_COMP_LUT_EN, 0x03 },
+ };
+
+ const struct ov_regvals compress_511[] = {
+ { 0x70, 0x1f },
+ { 0x71, 0x05 },
+ { 0x72, 0x06 },
+ { 0x73, 0x06 },
+ { 0x74, 0x14 },
+ { 0x75, 0x03 },
+ { 0x76, 0x04 },
+ { 0x77, 0x04 },
+ };
+
+ PDEBUG(D_PROBE, "Device custom id %x", reg_r(sd, R51x_SYS_CUST_ID));
+
+ rc = write_regvals(sd, init_511, ARRAY_SIZE(init_511));
+ if (rc < 0)
+ return rc;
+
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ rc = write_regvals(sd, norm_511, ARRAY_SIZE(norm_511));
+ if (rc < 0)
+ return rc;
+ break;
+ case BRIDGE_OV511PLUS:
+ rc = write_regvals(sd, norm_511_p, ARRAY_SIZE(norm_511_p));
+ if (rc < 0)
+ return rc;
+ break;
+ }
+
+ /* Init compression */
+ rc = write_regvals(sd, compress_511, ARRAY_SIZE(compress_511));
+ if (rc < 0)
+ return rc;
+
+ rc = ov51x_upload_quan_tables(sd);
+ if (rc < 0) {
+ PDEBUG(D_ERR, "Error uploading quantization tables");
+ return rc;
+ }
+
+ return 0;
+}
+
/* This initializes the OV518/OV518+ and the sensor */
static int ov518_configure(struct gspca_dev *gspca_dev)
{
@@ -1462,7 +1863,7 @@
int rc;
/* For 518 and 518+ */
- static struct ov_regvals init_518[] = {
+ const struct ov_regvals init_518[] = {
{ R51x_SYS_RESET, 0x40 },
{ R51x_SYS_INIT, 0xe1 },
{ R51x_SYS_RESET, 0x3e },
@@ -1473,7 +1874,7 @@
{ 0x5d, 0x03 },
};
- static struct ov_regvals norm_518[] = {
+ const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -1486,7 +1887,7 @@
{ 0x2f, 0x80 },
};
- static struct ov_regvals norm_518_p[] = {
+ const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -1531,7 +1932,7 @@
break;
}
- rc = ov518_upload_quan_tables(sd);
+ rc = ov51x_upload_quan_tables(sd);
if (rc < 0) {
PDEBUG(D_ERR, "Error uploading quantization tables");
return rc;
@@ -1573,9 +1974,14 @@
struct cam *cam;
int ret = 0;
- sd->bridge = id->driver_info;
+ sd->bridge = id->driver_info & BRIDGE_MASK;
+ sd->invert_led = id->driver_info & BRIDGE_INVERT_LED;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ ret = ov511_configure(gspca_dev);
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ret = ov518_configure(gspca_dev);
@@ -1634,6 +2040,16 @@
cam = &gspca_dev->cam;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ if (!sd->sif) {
+ cam->cam_mode = ov511_vga_mode;
+ cam->nmodes = ARRAY_SIZE(ov511_vga_mode);
+ } else {
+ cam->cam_mode = ov511_sif_mode;
+ cam->nmodes = ARRAY_SIZE(ov511_sif_mode);
+ }
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
if (!sd->sif) {
@@ -1655,13 +2071,28 @@
break;
}
sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
+ if (sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF)
+ sd->contrast = 200; /* The default is too low for the ov6630 */
+ else
+ sd->contrast = CONTRAST_DEF;
sd->colors = COLOR_DEF;
sd->hflip = HFLIP_DEF;
sd->vflip = VFLIP_DEF;
- if (sd->sensor != SEN_OV7670)
- gspca_dev->ctrl_dis = (1 << HFLIP_IDX)
- | (1 << VFLIP_IDX);
+ sd->autobrightness = AUTOBRIGHT_DEF;
+ if (sd->sensor == SEN_OV7670) {
+ sd->freq = OV7670_FREQ_DEF;
+ gspca_dev->ctrl_dis = 1 << FREQ_IDX;
+ } else {
+ sd->freq = FREQ_DEF;
+ gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) |
+ (1 << OV7670_FREQ_IDX);
+ }
+ if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
+ gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX;
+ /* OV8610 Frequency filter control should work but needs testing */
+ if (sd->sensor == SEN_OV8610)
+ gspca_dev->ctrl_dis |= 1 << FREQ_IDX;
+
return 0;
error:
PDEBUG(D_ERR, "OV519 Config failed");
@@ -1680,6 +2111,7 @@
return -EIO;
break;
case SEN_OV6630:
+ case SEN_OV66308AF:
if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
return -EIO;
break;
@@ -1688,6 +2120,8 @@
/* case SEN_OV76BE: */
if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610)))
return -EIO;
+ if (i2c_w_mask(sd, 0x0e, 0x00, 0x40))
+ return -EIO;
break;
case SEN_OV7620:
if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)))
@@ -1709,6 +2143,126 @@
return 0;
}
+/* Set up the OV511/OV511+ with the given image parameters.
+ *
+ * Do not put any sensor-specific code in here (including I2C I/O functions)
+ */
+static int ov511_mode_init_regs(struct sd *sd)
+{
+ int hsegs, vsegs, packet_size, fps, needed;
+ int interlaced = 0;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
+
+ reg_w(sd, R511_CAM_UV_EN, 0x01);
+ reg_w(sd, R511_SNAP_UV_EN, 0x01);
+ reg_w(sd, R511_SNAP_OPTS, 0x03);
+
+ /* Here I'm assuming that snapshot size == image size.
+ * I hope that's always true. --claudio
+ */
+ hsegs = (sd->gspca_dev.width >> 3) - 1;
+ vsegs = (sd->gspca_dev.height >> 3) - 1;
+
+ reg_w(sd, R511_CAM_PXCNT, hsegs);
+ reg_w(sd, R511_CAM_LNCNT, vsegs);
+ reg_w(sd, R511_CAM_PXDIV, 0x00);
+ reg_w(sd, R511_CAM_LNDIV, 0x00);
+
+ /* YUV420, low pass filter on */
+ reg_w(sd, R511_CAM_OPTS, 0x03);
+
+ /* Snapshot additions */
+ reg_w(sd, R511_SNAP_PXCNT, hsegs);
+ reg_w(sd, R511_SNAP_LNCNT, vsegs);
+ reg_w(sd, R511_SNAP_PXDIV, 0x00);
+ reg_w(sd, R511_SNAP_LNDIV, 0x00);
+
+ /******** Set the framerate ********/
+ if (frame_rate > 0)
+ sd->frame_rate = frame_rate;
+
+ switch (sd->sensor) {
+ case SEN_OV6620:
+ /* No framerate control, doesn't like higher rates yet */
+ sd->clockdiv = 3;
+ break;
+
+ /* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed
+ for more sensors we need to do this for them too */
+ case SEN_OV7620:
+ case SEN_OV7640:
+ case SEN_OV76BE:
+ if (sd->gspca_dev.width == 320)
+ interlaced = 1;
+ /* Fall through */
+ case SEN_OV6630:
+ case SEN_OV7610:
+ case SEN_OV7670:
+ switch (sd->frame_rate) {
+ case 30:
+ case 25:
+ /* Not enough bandwidth to do 640x480 @ 30 fps */
+ if (sd->gspca_dev.width != 640) {
+ sd->clockdiv = 0;
+ break;
+ }
+ /* Fall through for 640x480 case */
+ default:
+/* case 20: */
+/* case 15: */
+ sd->clockdiv = 1;
+ break;
+ case 10:
+ sd->clockdiv = 2;
+ break;
+ case 5:
+ sd->clockdiv = 5;
+ break;
+ }
+ if (interlaced) {
+ sd->clockdiv = (sd->clockdiv + 1) * 2 - 1;
+ /* Higher then 10 does not work */
+ if (sd->clockdiv > 10)
+ sd->clockdiv = 10;
+ }
+ break;
+
+ case SEN_OV8610:
+ /* No framerate control ?? */
+ sd->clockdiv = 0;
+ break;
+ }
+
+ /* Check if we have enough bandwidth to disable compression */
+ fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
+ needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
+ /* 1400 is a conservative estimate of the max nr of isoc packets/sec */
+ if (needed > 1400 * packet_size) {
+ /* Enable Y and UV quantization and compression */
+ reg_w(sd, R511_COMP_EN, 0x07);
+ reg_w(sd, R511_COMP_LUT_EN, 0x03);
+ } else {
+ reg_w(sd, R511_COMP_EN, 0x06);
+ reg_w(sd, R511_COMP_LUT_EN, 0x00);
+ }
+
+ reg_w(sd, R51x_SYS_RESET, OV511_RESET_OMNICE);
+ reg_w(sd, R51x_SYS_RESET, 0);
+
+ return 0;
+}
+
/* Sets up the OV518/OV518+ with the given image parameters
*
* OV518 needs a completely different approach, until we can figure out what
@@ -1718,7 +2272,19 @@
*/
static int ov518_mode_init_regs(struct sd *sd)
{
- int hsegs, vsegs;
+ int hsegs, vsegs, packet_size;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
/******** Set the mode ********/
@@ -1755,20 +2321,30 @@
/* Windows driver does this here; who knows why */
reg_w(sd, 0x2f, 0x80);
- /******** Set the framerate (to 30 FPS) ********/
- if (sd->bridge == BRIDGE_OV518PLUS)
- sd->clockdiv = 1;
- else
- sd->clockdiv = 0;
+ /******** Set the framerate ********/
+ sd->clockdiv = 1;
/* Mode independent, but framerate dependent, regs */
- reg_w(sd, 0x51, 0x04); /* Clock divider; lower==faster */
+ /* 0x51: Clock divider; Only works on some cams which use 2 crystals */
+ reg_w(sd, 0x51, 0x04);
reg_w(sd, 0x22, 0x18);
reg_w(sd, 0x23, 0xff);
- if (sd->bridge == BRIDGE_OV518PLUS)
- reg_w(sd, 0x21, 0x19);
- else
+ if (sd->bridge == BRIDGE_OV518PLUS) {
+ switch (sd->sensor) {
+ case SEN_OV7620:
+ if (sd->gspca_dev.width == 320) {
+ reg_w(sd, 0x20, 0x00);
+ reg_w(sd, 0x21, 0x19);
+ } else {
+ reg_w(sd, 0x20, 0x60);
+ reg_w(sd, 0x21, 0x1f);
+ }
+ break;
+ default:
+ reg_w(sd, 0x21, 0x19);
+ }
+ } else
reg_w(sd, 0x71, 0x17); /* Compression-related? */
/* FIXME: Sensor-specific */
@@ -1879,7 +2455,11 @@
reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4);
reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3);
- reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
+ if (sd->sensor == SEN_OV7670 &&
+ sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
+ reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
+ else
+ reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
reg_w(sd, OV519_R13_X_OFFSETH, 0x00);
reg_w(sd, OV519_R14_Y_OFFSETL, 0x00);
reg_w(sd, OV519_R15_Y_OFFSETH, 0x00);
@@ -1971,7 +2551,7 @@
int qvga;
gspca_dev = &sd->gspca_dev;
- qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
+ qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1;
/******** Mode (VGA/QVGA) and sensor specific regs ********/
switch (sd->sensor) {
@@ -1983,21 +2563,16 @@
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
break;
case SEN_OV7620:
-/* i2c_w(sd, 0x2b, 0x00); */
+ case SEN_OV76BE:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a);
i2c_w(sd, 0x25, qvga ? 0x30 : 0x60);
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
- i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0);
+ i2c_w_mask(sd, 0x67, qvga ? 0xb0 : 0x90, 0xf0);
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
break;
- case SEN_OV76BE:
-/* i2c_w(sd, 0x2b, 0x00); */
- i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
- break;
case SEN_OV7640:
-/* i2c_w(sd, 0x2b, 0x00); */
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
/* i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */
@@ -2016,6 +2591,7 @@
break;
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
break;
default:
@@ -2023,10 +2599,6 @@
}
/******** Palette-specific regs ********/
- if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) {
- /* not valid on the OV6620/OV7620/6630? */
- i2c_w_mask(sd, 0x0e, 0x00, 0x40);
- }
/* The OV518 needs special treatment. Although both the OV518
* and the OV6630 support a 16-bit video bus, only the 8 bit Y
@@ -2036,25 +2608,12 @@
/* OV7640 is 8-bit only */
- if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV7640)
+ if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV66308AF &&
+ sd->sensor != SEN_OV7640)
i2c_w_mask(sd, 0x13, 0x00, 0x20);
/******** Clock programming ********/
- /* The OV6620 needs special handling. This prevents the
- * severe banding that normally occurs */
- if (sd->sensor == SEN_OV6620) {
-
- /* Clock down */
- i2c_w(sd, 0x2a, 0x04);
- i2c_w(sd, 0x11, sd->clockdiv);
- i2c_w(sd, 0x2a, 0x84);
- /* This next setting is critical. It seems to improve
- * the gain or the contrast. The "reserved" bits seem
- * to have some effect in this case. */
- i2c_w(sd, 0x2d, 0x85);
- } else {
- i2c_w(sd, 0x11, sd->clockdiv);
- }
+ i2c_w(sd, 0x11, sd->clockdiv);
/******** Special Features ********/
/* no evidence this is possible with OV7670, either */
@@ -2098,13 +2657,14 @@
static int set_ov_sensor_window(struct sd *sd)
{
struct gspca_dev *gspca_dev;
- int qvga;
+ int qvga, crop;
int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale;
int ret, hstart, hstop, vstop, vstart;
__u8 v;
gspca_dev = &sd->gspca_dev;
- qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
+ qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1;
+ crop = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 2;
/* The different sensor ICs handle setting up of window differently.
* IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */
@@ -2123,14 +2683,19 @@
break;
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
hwsbase = 0x38;
hwebase = 0x3a;
vwsbase = 0x05;
vwebase = 0x06;
- if (qvga) {
+ if (sd->sensor == SEN_OV66308AF && qvga)
/* HDG: this fixes U and V getting swapped */
- hwsbase--;
- vwsbase--;
+ hwsbase++;
+ if (crop) {
+ hwsbase += 8;
+ hwebase += 8;
+ vwsbase += 11;
+ vwebase += 11;
}
break;
case SEN_OV7620:
@@ -2155,6 +2720,7 @@
switch (sd->sensor) {
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
if (qvga) { /* QCIF */
hwscale = 0;
vwscale = 0;
@@ -2207,7 +2773,7 @@
if (qvga) { /* QVGA from ov7670.c by
* Jonathan Corbet */
hstart = 164;
- hstop = 20;
+ hstop = 28;
vstart = 14;
vstop = 494;
} else { /* VGA */
@@ -2233,7 +2799,6 @@
msleep(10); /* need to sleep between read and write to
* same reg! */
i2c_w(sd, OV7670_REG_VREF, v);
- sethvflip(sd);
} else {
i2c_w(sd, 0x17, hwsbase);
i2c_w(sd, 0x18, hwebase + (sd->gspca_dev.width >> hwscale));
@@ -2250,6 +2815,10 @@
int ret = 0;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ ret = ov511_mode_init_regs(sd);
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ret = ov518_mode_init_regs(sd);
@@ -2268,6 +2837,9 @@
setcontrast(gspca_dev);
setbrightness(gspca_dev);
setcolors(gspca_dev);
+ sethvflip(sd);
+ setautobrightness(sd);
+ setfreq(sd);
ret = ov51x_restart(sd);
if (ret < 0)
@@ -2287,23 +2859,88 @@
ov51x_led_control(sd, 0);
}
+static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
+ struct gspca_frame *frame, /* target */
+ __u8 *in, /* isoc packet */
+ int len) /* iso packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ /* SOF/EOF packets have 1st to 8th bytes zeroed and the 9th
+ * byte non-zero. The EOF packet has image width/height in the
+ * 10th and 11th bytes. The 9th byte is given as follows:
+ *
+ * bit 7: EOF
+ * 6: compression enabled
+ * 5: 422/420/400 modes
+ * 4: 422/420/400 modes
+ * 3: 1
+ * 2: snapshot button on
+ * 1: snapshot frame
+ * 0: even/odd field
+ */
+ if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) &&
+ (in[8] & 0x08)) {
+ if (in[8] & 0x80) {
+ /* Frame end */
+ if ((in[9] + 1) * 8 != gspca_dev->width ||
+ (in[10] + 1) * 8 != gspca_dev->height) {
+ PDEBUG(D_ERR, "Invalid frame size, got: %dx%d,"
+ " requested: %dx%d\n",
+ (in[9] + 1) * 8, (in[10] + 1) * 8,
+ gspca_dev->width, gspca_dev->height);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+ /* Add 11 byte footer to frame, might be usefull */
+ gspca_frame_add(gspca_dev, LAST_PACKET, frame, in, 11);
+ return;
+ } else {
+ /* Frame start */
+ gspca_frame_add(gspca_dev, FIRST_PACKET, frame, in, 0);
+ sd->packet_nr = 0;
+ }
+ }
+
+ /* Ignore the packet number */
+ len--;
+
+ /* intermediate packet */
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame, in, len);
+}
+
static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
struct gspca_frame *frame, /* target */
__u8 *data, /* isoc packet */
int len) /* iso packet length */
{
- PDEBUG(D_STREAM, "ov518_pkt_scan: %d bytes", len);
-
- if (len & 7) {
- len--;
- PDEBUG(D_STREAM, "packet number: %d\n", (int)data[len]);
- }
+ struct sd *sd = (struct sd *) gspca_dev;
/* A false positive here is likely, until OVT gives me
* the definitive SOF/EOF format */
if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0);
+ sd->packet_nr = 0;
+ }
+
+ if (gspca_dev->last_packet_type == DISCARD_PACKET)
+ return;
+
+ /* Does this device use packet numbers ? */
+ if (len & 7) {
+ len--;
+ if (sd->packet_nr == data[len])
+ sd->packet_nr++;
+ /* The last few packets of the frame (which are all 0's
+ except that they may contain part of the footer), are
+ numbered 0 */
+ else if (sd->packet_nr == 0 || data[len]) {
+ PDEBUG(D_ERR, "Invalid packet nr: %d (expect: %d)",
+ (int)data[len], (int)sd->packet_nr);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
}
/* intermediate packet */
@@ -2364,6 +3001,7 @@
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
+ ov511_pkt_scan(gspca_dev, frame, data, len);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
@@ -2389,13 +3027,13 @@
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
case SEN_OV7640:
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7620:
/* 7620 doesn't like manual changes when in auto mode */
-/*fixme
- * if (!sd->auto_brt) */
+ if (!sd->autobrightness)
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7670:
@@ -2418,6 +3056,7 @@
i2c_w(sd, OV7610_REG_CNT, val);
break;
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f);
break;
case SEN_OV8610: {
@@ -2462,6 +3101,7 @@
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w(sd, OV7610_REG_SAT, val);
break;
case SEN_OV7620:
@@ -2482,6 +3122,72 @@
}
}
+static void setautobrightness(struct sd *sd)
+{
+ if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
+ return;
+
+ i2c_w_mask(sd, 0x2d, sd->autobrightness ? 0x10 : 0x00, 0x10);
+}
+
+static void setfreq(struct sd *sd)
+{
+ if (sd->sensor == SEN_OV7670) {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_BFILT);
+ break;
+ case 1: /* 50 hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, 0x08, 0x18);
+ break;
+ case 2: /* 60 hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, 0x00, 0x18);
+ break;
+ case 3: /* Auto hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, OV7670_COM11_HZAUTO,
+ 0x18);
+ break;
+ }
+ } else {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w_mask(sd, 0x2d, 0x00, 0x04);
+ i2c_w_mask(sd, 0x2a, 0x00, 0x80);
+ break;
+ case 1: /* 50 hz (filter on and framerate adj) */
+ i2c_w_mask(sd, 0x2d, 0x04, 0x04);
+ i2c_w_mask(sd, 0x2a, 0x80, 0x80);
+ /* 20 fps -> 16.667 fps */
+ if (sd->sensor == SEN_OV6620 ||
+ sd->sensor == SEN_OV6630 ||
+ sd->sensor == SEN_OV66308AF)
+ i2c_w(sd, 0x2b, 0x5e);
+ else
+ i2c_w(sd, 0x2b, 0xac);
+ break;
+ case 2: /* 60 hz (filter on, ...) */
+ i2c_w_mask(sd, 0x2d, 0x04, 0x04);
+ if (sd->sensor == SEN_OV6620 ||
+ sd->sensor == SEN_OV6630 ||
+ sd->sensor == SEN_OV66308AF) {
+ /* 20 fps -> 15 fps */
+ i2c_w_mask(sd, 0x2a, 0x80, 0x80);
+ i2c_w(sd, 0x2b, 0xa8);
+ } else {
+ /* no framerate adj. */
+ i2c_w_mask(sd, 0x2a, 0x00, 0x80);
+ }
+ break;
+ }
+ }
+}
+
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -2572,6 +3278,71 @@
return 0;
}
+static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->autobrightness = val;
+ if (gspca_dev->streaming)
+ setautobrightness(sd);
+ return 0;
+}
+
+static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->autobrightness;
+ return 0;
+}
+
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freq = val;
+ if (gspca_dev->streaming)
+ setfreq(sd);
+ return 0;
+}
+
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freq;
+ return 0;
+}
+
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "NoFliker");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
+ strcpy((char *) menu->name, "60 Hz");
+ return 0;
+ case 3:
+ if (sd->sensor != SEN_OV7670)
+ return -EINVAL;
+
+ strcpy((char *) menu->name, "Automatic");
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -2582,6 +3353,7 @@
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
+ .querymenu = sd_querymenu,
};
/* -- module initialisation -- */
@@ -2590,17 +3362,22 @@
{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x4064),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x041e, 0x4068),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
{USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
{USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0xa511), .driver_info = BRIDGE_OV511PLUS },
{USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS },
+ {USB_DEVICE(0x0813, 0x0002), .driver_info = BRIDGE_OV511PLUS },
{}
};
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index dc6a6f1..0d02f41 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -46,6 +46,7 @@
u8 gamma;
u8 vflip; /* ov7630/ov7648 only */
u8 infrared; /* mt9v111 only */
+ u8 freq; /* ov76xx only */
u8 quality; /* image quality */
#define QUALITY_MIN 60
#define QUALITY_MAX 95
@@ -96,8 +97,11 @@
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static struct ctrl sd_ctrls[] = {
+#define BRIGHTNESS_IDX 0
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -113,6 +117,7 @@
.set = sd_setbrightness,
.get = sd_getbrightness,
},
+#define CONTRAST_IDX 1
{
{
.id = V4L2_CID_CONTRAST,
@@ -128,20 +133,22 @@
.set = sd_setcontrast,
.get = sd_getcontrast,
},
+#define COLOR_IDX 2
{
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
+ .name = "Saturation",
.minimum = 0,
.maximum = 40,
.step = 1,
-#define COLOR_DEF 32
+#define COLOR_DEF 25
.default_value = COLOR_DEF,
},
.set = sd_setcolors,
.get = sd_getcolors,
},
+#define BLUE_BALANCE_IDX 3
{
{
.id = V4L2_CID_BLUE_BALANCE,
@@ -156,6 +163,7 @@
.set = sd_setblue_balance,
.get = sd_getblue_balance,
},
+#define RED_BALANCE_IDX 4
{
{
.id = V4L2_CID_RED_BALANCE,
@@ -170,6 +178,7 @@
.set = sd_setred_balance,
.get = sd_getred_balance,
},
+#define GAMMA_IDX 5
{
{
.id = V4L2_CID_GAMMA,
@@ -184,7 +193,7 @@
.set = sd_setgamma,
.get = sd_getgamma,
},
-#define AUTOGAIN_IDX 5
+#define AUTOGAIN_IDX 6
{
{
.id = V4L2_CID_AUTOGAIN,
@@ -200,7 +209,7 @@
.get = sd_getautogain,
},
/* ov7630/ov7648 only */
-#define VFLIP_IDX 6
+#define VFLIP_IDX 7
{
{
.id = V4L2_CID_VFLIP,
@@ -209,14 +218,14 @@
.minimum = 0,
.maximum = 1,
.step = 1,
-#define VFLIP_DEF 0 /* vflip def = 1 for ov7630 */
+#define VFLIP_DEF 0
.default_value = VFLIP_DEF,
},
.set = sd_setvflip,
.get = sd_getvflip,
},
/* mt9v111 only */
-#define INFRARED_IDX 7
+#define INFRARED_IDX 8
{
{
.id = V4L2_CID_INFRARED,
@@ -231,28 +240,44 @@
.set = sd_setinfrared,
.get = sd_getinfrared,
},
+/* ov7630/ov7648/ov7660 only */
+#define FREQ_IDX 9
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
+ .step = 1,
+#define FREQ_DEF 2
+ .default_value = FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
};
/* table of the disabled controls */
static __u32 ctrl_dis[] = {
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_HV7131R 0 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MI0360 1 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MO4000 2 */
- (1 << VFLIP_IDX),
+ (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MT9V111 3 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_OM6802 4 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX),
+ (1 << INFRARED_IDX),
/* SENSOR_OV7630 5 */
(1 << INFRARED_IDX),
/* SENSOR_OV7648 6 */
(1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
/* SENSOR_OV7660 7 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
- /* SENSOR_SP80708 8 */
+ (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX), /* SENSOR_SP80708 8 */
};
static const struct v4l2_pix_format vga_mode[] = {
@@ -268,7 +293,8 @@
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
- .sizeimage = 640 * 480 * 3 / 8 + 590,
+ /* Note 3 / 8 is not large enough, not even 5 / 8 is ?! */
+ .sizeimage = 640 * 480 * 3 / 4 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
@@ -604,7 +630,9 @@
/* win: i2c_r from 00 to 80 */
{0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10},
{0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10},
- {0xd1, 0x21, 0x11, 0x00, 0x48, 0xc0, 0x00, 0x10},
+/* HDG: 0x11 was 0x00 change to 0x01 for better exposure (15 fps instead of 30)
+ 0x13 was 0xc0 change to 0xc3 for auto gain and exposure */
+ {0xd1, 0x21, 0x11, 0x01, 0x48, 0xc3, 0x00, 0x10},
{0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10},
{0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
{0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
@@ -638,9 +666,8 @@
{0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10},
/* */
- {0xa1, 0x21, 0x11, 0x00, 0x00, 0x00, 0x00, 0x10},
- {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10},
- {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10},
+/* {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */
+/* {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */
/* */
{0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10},
/* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */
@@ -673,7 +700,7 @@
{0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10},
/* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */
/* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */
- {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10},
+/* {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, set by setfreq */
/*...*/
/* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */
/* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN
@@ -1294,11 +1321,9 @@
sd->gamma = GAMMA_DEF;
sd->autogain = AUTOGAIN_DEF;
sd->ag_cnt = -1;
- if (sd->sensor != SENSOR_OV7630)
- sd->vflip = 0;
- else
- sd->vflip = 1;
+ sd->vflip = VFLIP_DEF;
sd->infrared = INFRARED_DEF;
+ sd->freq = FREQ_DEF;
sd->quality = QUALITY_DEF;
sd->jpegqual = 80;
@@ -1569,7 +1594,7 @@
else
comb = 0xa0;
if (sd->autogain)
- comb |= 0x02;
+ comb |= 0x03;
i2c_w1(&sd->gspca_dev, 0x13, comb);
return;
}
@@ -1585,12 +1610,15 @@
{
u8 comn;
- if (sd->sensor == SENSOR_OV7630)
+ if (sd->sensor == SENSOR_OV7630) {
comn = 0x02;
- else
+ if (!sd->vflip)
+ comn |= 0x80;
+ } else {
comn = 0x06;
- if (sd->vflip)
- comn |= 0x80;
+ if (sd->vflip)
+ comn |= 0x80;
+ }
i2c_w1(&sd->gspca_dev, 0x75, comn);
}
@@ -1602,6 +1630,58 @@
sd->infrared ? 0x66 : 0x64);
}
+static void setfreq(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->sensor == SENSOR_OV7660) {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w1(gspca_dev, 0x13, 0xdf);
+ break;
+ case 1: /* 50 hz */
+ i2c_w1(gspca_dev, 0x13, 0xff);
+ i2c_w1(gspca_dev, 0x3b, 0x0a);
+ break;
+ case 2: /* 60 hz */
+ i2c_w1(gspca_dev, 0x13, 0xff);
+ i2c_w1(gspca_dev, 0x3b, 0x02);
+ break;
+ }
+ } else {
+ u8 reg2a = 0, reg2b = 0, reg2d = 0;
+
+ /* Get reg2a / reg2d base values */
+ switch (sd->sensor) {
+ case SENSOR_OV7630:
+ reg2a = 0x08;
+ reg2d = 0x01;
+ break;
+ case SENSOR_OV7648:
+ reg2a = 0x11;
+ reg2d = 0x81;
+ break;
+ }
+
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ break;
+ case 1: /* 50 hz (filter on and framerate adj) */
+ reg2a |= 0x80;
+ reg2b = 0xac;
+ reg2d |= 0x04;
+ break;
+ case 2: /* 60 hz (filter on, no framerate adj) */
+ reg2a |= 0x80;
+ reg2d |= 0x04;
+ break;
+ }
+ i2c_w1(gspca_dev, 0x2a, reg2a);
+ i2c_w1(gspca_dev, 0x2b, reg2b);
+ i2c_w1(gspca_dev, 0x2d, reg2d);
+ }
+}
+
static void setjpegqual(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1828,6 +1908,7 @@
setbrightness(gspca_dev);
setcontrast(gspca_dev);
setautogain(gspca_dev);
+ setfreq(gspca_dev);
return 0;
}
@@ -2131,6 +2212,24 @@
return 0;
}
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freq = val;
+ if (gspca_dev->streaming)
+ setfreq(gspca_dev);
+ return 0;
+}
+
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freq;
+ return 0;
+}
+
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
@@ -2159,6 +2258,27 @@
return 0;
}
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "NoFliker");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
+ strcpy((char *) menu->name, "60 Hz");
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -2173,6 +2293,7 @@
.dq_callback = do_autogain,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
+ .querymenu = sd_querymenu,
};
/* -- module initialisation -- */
@@ -2233,7 +2354,7 @@
{USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)},
#endif
{USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)},
-/* {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x??)}, */
+ {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x21)},
{USB_DEVICE(0x0c45, 0x6143), BSI(SN9C120, SP80708, 0x18)},
{}
};
diff --git a/drivers/media/video/gspca/stv06xx/Makefile b/drivers/media/video/gspca/stv06xx/Makefile
index feeaa94..2f3c3a6 100644
--- a/drivers/media/video/gspca/stv06xx/Makefile
+++ b/drivers/media/video/gspca/stv06xx/Makefile
@@ -3,7 +3,8 @@
gspca_stv06xx-objs := stv06xx.o \
stv06xx_vv6410.o \
stv06xx_hdcs.o \
- stv06xx_pb0100.o
+ stv06xx_pb0100.o \
+ stv06xx_st6422.o
EXTRA_CFLAGS += -Idrivers/media/video/gspca
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index e573c34..0da8e0d 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -92,11 +92,10 @@
{
int err = 0;
- if (IS_850(sd)) {
+ if (sd->bridge == BRIDGE_STV610) {
struct usb_device *udev = sd->gspca_dev.dev;
__u8 *buf = sd->gspca_dev.usb_buf;
- /* Quickam Web needs an extra packet */
buf[0] = 0;
err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x04, 0x40, 0x1704, 0, buf, 1,
@@ -253,7 +252,7 @@
err = sd->sensor->init(sd);
- if (dump_sensor)
+ if (dump_sensor && sd->sensor->dump)
sd->sensor->dump(sd);
return (err < 0) ? err : 0;
@@ -318,6 +317,8 @@
__u8 *data, /* isoc packet */
int len) /* iso packet length */
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
PDEBUG(D_PACK, "Packet of length %d arrived", len);
/* A packet may contain several frames
@@ -343,14 +344,29 @@
if (len < chunk_len) {
PDEBUG(D_ERR, "URB packet length is smaller"
" than the specified chunk length");
+ gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
+ /* First byte seem to be 02=data 2nd byte is unknown??? */
+ if (sd->bridge == BRIDGE_ST6422 && (id & 0xFF00) == 0x0200)
+ goto frame_data;
+
switch (id) {
case 0x0200:
case 0x4200:
+frame_data:
PDEBUG(D_PACK, "Frame data packet detected");
+ if (sd->to_skip) {
+ int skip = (sd->to_skip < chunk_len) ?
+ sd->to_skip : chunk_len;
+ data += skip;
+ len -= skip;
+ chunk_len -= skip;
+ sd->to_skip -= skip;
+ }
+
gspca_frame_add(gspca_dev, INTER_PACKET, frame,
data, chunk_len);
break;
@@ -365,6 +381,9 @@
gspca_frame_add(gspca_dev, FIRST_PACKET,
frame, data, 0);
+ if (sd->bridge == BRIDGE_ST6422)
+ sd->to_skip = gspca_dev->width * 4;
+
if (chunk_len)
PDEBUG(D_ERR, "Chunk length is "
"non-zero on a SOF");
@@ -395,8 +414,12 @@
/* Unknown chunk with 2 bytes of data,
occurs 2-3 times per USB interrupt */
break;
+ case 0x42ff:
+ PDEBUG(D_PACK, "Chunk 0x42ff detected");
+ /* Special chunk seen sometimes on the ST6422 */
+ break;
default:
- PDEBUG(D_PACK, "Unknown chunk %d detected", id);
+ PDEBUG(D_PACK, "Unknown chunk 0x%04x detected", id);
/* Unknown chunk */
}
data += chunk_len;
@@ -428,11 +451,16 @@
cam = &gspca_dev->cam;
sd->desc = sd_desc;
+ sd->bridge = id->driver_info;
gspca_dev->sd_desc = &sd->desc;
if (dump_bridge)
stv06xx_dump_bridge(sd);
+ sd->sensor = &stv06xx_sensor_st6422;
+ if (!sd->sensor->probe(sd))
+ return 0;
+
sd->sensor = &stv06xx_sensor_vv6410;
if (!sd->sensor->probe(sd))
return 0;
@@ -457,9 +485,20 @@
/* -- module initialisation -- */
static const __devinitdata struct usb_device_id device_table[] = {
- {USB_DEVICE(0x046d, 0x0840)}, /* QuickCam Express */
- {USB_DEVICE(0x046d, 0x0850)}, /* LEGO cam / QuickCam Web */
- {USB_DEVICE(0x046d, 0x0870)}, /* Dexxa WebCam USB */
+ /* QuickCam Express */
+ {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
+ /* LEGO cam / QuickCam Web */
+ {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 },
+ /* Dexxa WebCam USB */
+ {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 },
+ /* QuickCam Messenger */
+ {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Communicate */
+ {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Messenger (new) */
+ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Messenger (new) */
+ {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 },
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index 1207e7d..9df7137 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -93,6 +93,17 @@
/* Sensor private data */
void *sensor_priv;
+
+ /* The first 4 lines produced by the stv6422 are no good, this keeps
+ track of how many bytes we still need to skip during a frame */
+ int to_skip;
+
+ /* Bridge / Camera type */
+ u8 bridge;
+ #define BRIDGE_STV600 0
+ #define BRIDGE_STV602 1
+ #define BRIDGE_STV610 2
+ #define BRIDGE_ST6422 3 /* With integrated sensor */
};
int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index b169038..3039ec2 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -434,7 +434,7 @@
hdcs->exp.er = 100;
/*
- * Frame rate on HDCS-1000 0x46D:0x840 depends on PSMP:
+ * Frame rate on HDCS-1000 with STV600 depends on PSMP:
* 4 = doesn't work at all
* 5 = 7.8 fps,
* 6 = 6.9 fps,
@@ -443,7 +443,7 @@
* 15 = 4.4 fps,
* 31 = 2.8 fps
*
- * Frame rate on HDCS-1000 0x46D:0x870 depends on PSMP:
+ * Frame rate on HDCS-1000 with STV602 depends on PSMP:
* 15 = doesn't work at all
* 18 = doesn't work at all
* 19 = 7.3 fps
@@ -453,7 +453,7 @@
* 24 = 6.3 fps
* 30 = 5.4 fps
*/
- hdcs->psmp = IS_870(sd) ? 20 : 5;
+ hdcs->psmp = (sd->bridge == BRIDGE_STV602) ? 20 : 5;
sd->sensor_priv = hdcs;
@@ -530,7 +530,7 @@
int i, err = 0;
/* Set the STV0602AA in STV0600 emulation mode */
- if (IS_870(sd))
+ if (sd->bridge == BRIDGE_STV602)
stv06xx_write_bridge(sd, STV_STV0600_EMULATION, 1);
/* Execute the bridge init */
@@ -558,7 +558,7 @@
return err;
/* Set PGA sample duration
- (was 0x7E for IS_870, but caused slow framerate with HDCS-1020) */
+ (was 0x7E for the STV602, but caused slow framerate with HDCS-1020) */
if (IS_1020(sd))
err = stv06xx_write_sensor(sd, HDCS_TCTRL,
(HDCS_ADC_START_SIG_DUR << 6) | hdcs->psmp);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
index e88c42f..934b9ce 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
@@ -32,14 +32,13 @@
#include "stv06xx.h"
-#define IS_850(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x850)
-#define IS_870(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x870)
#define IS_1020(sd) ((sd)->sensor == &stv06xx_sensor_hdcs1020)
extern const struct stv06xx_sensor stv06xx_sensor_vv6410;
extern const struct stv06xx_sensor stv06xx_sensor_hdcs1x00;
extern const struct stv06xx_sensor stv06xx_sensor_hdcs1020;
extern const struct stv06xx_sensor stv06xx_sensor_pb0100;
+extern const struct stv06xx_sensor stv06xx_sensor_st6422;
struct stv06xx_sensor {
/* Defines the name of a sensor */
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
new file mode 100644
index 0000000..87cb5b9
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -0,0 +1,453 @@
+/*
+ * Support for the sensor part which is integrated (I think) into the
+ * st6422 stv06xx alike bridge, as its integrated there are no i2c writes
+ * but instead direct bridge writes.
+ *
+ * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Strongly based on qc-usb-messenger, which is:
+ * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
+ * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
+ * Copyright (c) 2002, 2003 Tuukka Toivonen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "stv06xx_st6422.h"
+
+static struct v4l2_pix_format st6422_mode[] = {
+ /* Note we actually get 124 lines of data, of which we skip the 4st
+ 4 as they are garbage */
+ {
+ 162,
+ 120,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .sizeimage = 162 * 120,
+ .bytesperline = 162,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 1
+ },
+ /* Note we actually get 248 lines of data, of which we skip the 4st
+ 4 as they are garbage, and we tell the app it only gets the
+ first 240 of the 244 lines it actually gets, so that it ignores
+ the last 4. */
+ {
+ 324,
+ 240,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .sizeimage = 324 * 244,
+ .bytesperline = 324,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0
+ },
+};
+
+static const struct ctrl st6422_ctrl[] = {
+#define BRIGHTNESS_IDX 0
+ {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 3
+ },
+ .set = st6422_set_brightness,
+ .get = st6422_get_brightness
+ },
+#define CONTRAST_IDX 1
+ {
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 15,
+ .step = 1,
+ .default_value = 11
+ },
+ .set = st6422_set_contrast,
+ .get = st6422_get_contrast
+ },
+#define GAIN_IDX 2
+ {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64
+ },
+ .set = st6422_set_gain,
+ .get = st6422_get_gain
+ },
+#define EXPOSURE_IDX 3
+ {
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 1023,
+ .step = 1,
+ .default_value = 256
+ },
+ .set = st6422_set_exposure,
+ .get = st6422_get_exposure
+ },
+};
+
+static int st6422_probe(struct sd *sd)
+{
+ int i;
+ s32 *sensor_settings;
+
+ if (sd->bridge != BRIDGE_ST6422)
+ return -ENODEV;
+
+ info("st6422 sensor detected");
+
+ sensor_settings = kmalloc(ARRAY_SIZE(st6422_ctrl) * sizeof(s32),
+ GFP_KERNEL);
+ if (!sensor_settings)
+ return -ENOMEM;
+
+ sd->gspca_dev.cam.cam_mode = st6422_mode;
+ sd->gspca_dev.cam.nmodes = ARRAY_SIZE(st6422_mode);
+ sd->desc.ctrls = st6422_ctrl;
+ sd->desc.nctrls = ARRAY_SIZE(st6422_ctrl);
+ sd->sensor_priv = sensor_settings;
+
+ for (i = 0; i < sd->desc.nctrls; i++)
+ sensor_settings[i] = st6422_ctrl[i].qctrl.default_value;
+
+ return 0;
+}
+
+static int st6422_init(struct sd *sd)
+{
+ int err = 0, i;
+
+ const u16 st6422_bridge_init[][2] = {
+ { STV_ISO_ENABLE, 0x00 }, /* disable capture */
+ { 0x1436, 0x00 },
+ { 0x1432, 0x03 }, /* 0x00-0x1F brightness */
+ { 0x143a, 0xF9 }, /* 0x00-0x0F contrast */
+ { 0x0509, 0x38 }, /* R */
+ { 0x050a, 0x38 }, /* G */
+ { 0x050b, 0x38 }, /* B */
+ { 0x050c, 0x2A },
+ { 0x050d, 0x01 },
+
+
+ { 0x1431, 0x00 }, /* 0x00-0x07 ??? */
+ { 0x1433, 0x34 }, /* 160x120, 0x00-0x01 night filter */
+ { 0x1438, 0x18 }, /* 640x480 */
+/* 18 bayes */
+/* 10 compressed? */
+
+ { 0x1439, 0x00 },
+/* antiflimmer?? 0xa2 ger perfekt bild mot monitor */
+
+ { 0x143b, 0x05 },
+ { 0x143c, 0x00 }, /* 0x00-0x01 - ??? */
+
+
+/* shutter time 0x0000-0x03FF */
+/* low value give good picures on moving objects (but requires much light) */
+/* high value gives good picures in darkness (but tends to be overexposed) */
+ { 0x143e, 0x01 },
+ { 0x143d, 0x00 },
+
+ { 0x1442, 0xe2 },
+/* write: 1x1x xxxx */
+/* read: 1x1x xxxx */
+/* bit 5 == button pressed and hold if 0 */
+/* write 0xe2,0xea */
+
+/* 0x144a */
+/* 0x00 init */
+/* bit 7 == button has been pressed, but not handled */
+
+/* interrupt */
+/* if(urb->iso_frame_desc[i].status == 0x80) { */
+/* if(urb->iso_frame_desc[i].status == 0x88) { */
+
+ { 0x1500, 0xd0 },
+ { 0x1500, 0xd0 },
+ { 0x1500, 0x50 }, /* 0x00 - 0xFF 0x80 == compr ? */
+
+ { 0x1501, 0xaf },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+ { 0x1502, 0xc2 },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+ { 0x1503, 0x45 },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+
+ { 0x1505, 0x02 },
+/* 2 : 324x248 80352 bytes */
+/* 7 : 248x162 40176 bytes */
+/* c+f: 162*124 20088 bytes */
+
+ { 0x150e, 0x8e },
+ { 0x150f, 0x37 },
+ { 0x15c0, 0x00 },
+ { 0x15c1, 1023 }, /* 160x120, ISOC_PACKET_SIZE */
+ { 0x15c3, 0x08 }, /* 0x04/0x14 ... test pictures ??? */
+
+
+ { 0x143f, 0x01 }, /* commit settings */
+
+ };
+
+ for (i = 0; i < ARRAY_SIZE(st6422_bridge_init) && !err; i++) {
+ err = stv06xx_write_bridge(sd, st6422_bridge_init[i][0],
+ st6422_bridge_init[i][1]);
+ }
+
+ return err;
+}
+
+static void st6422_disconnect(struct sd *sd)
+{
+ sd->sensor = NULL;
+ kfree(sd->sensor_priv);
+}
+
+static int st6422_start(struct sd *sd)
+{
+ int err, packet_size;
+ struct cam *cam = &sd->gspca_dev.cam;
+ s32 *sensor_settings = sd->sensor_priv;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ err = stv06xx_write_bridge(sd, 0x15c1, packet_size);
+ if (err < 0)
+ return err;
+
+ if (cam->cam_mode[sd->gspca_dev.curr_mode].priv)
+ err = stv06xx_write_bridge(sd, 0x1505, 0x0f);
+ else
+ err = stv06xx_write_bridge(sd, 0x1505, 0x02);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_brightness(&sd->gspca_dev,
+ sensor_settings[BRIGHTNESS_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_contrast(&sd->gspca_dev,
+ sensor_settings[CONTRAST_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_exposure(&sd->gspca_dev,
+ sensor_settings[EXPOSURE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_gain(&sd->gspca_dev,
+ sensor_settings[GAIN_IDX]);
+ if (err < 0)
+ return err;
+
+ PDEBUG(D_STREAM, "Starting stream");
+
+ return 0;
+}
+
+static int st6422_stop(struct sd *sd)
+{
+ PDEBUG(D_STREAM, "Halting stream");
+
+ return 0;
+}
+
+static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[BRIGHTNESS_IDX];
+
+ PDEBUG(D_V4L2, "Read brightness %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[BRIGHTNESS_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ /* val goes from 0 -> 31 */
+ PDEBUG(D_V4L2, "Set brightness to %d", val);
+ err = stv06xx_write_bridge(sd, 0x1432, val);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[CONTRAST_IDX];
+
+ PDEBUG(D_V4L2, "Read contrast %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[CONTRAST_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ /* Val goes from 0 -> 15 */
+ PDEBUG(D_V4L2, "Set contrast to %d\n", val);
+ err = stv06xx_write_bridge(sd, 0x143a, 0xf0 | val);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[GAIN_IDX];
+
+ PDEBUG(D_V4L2, "Read gain %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[GAIN_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ PDEBUG(D_V4L2, "Set gain to %d", val);
+
+ /* Set red, green, blue, gain */
+ err = stv06xx_write_bridge(sd, 0x0509, val);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050a, val);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050b, val);
+ if (err < 0)
+ return err;
+
+ /* 2 mystery writes */
+ err = stv06xx_write_bridge(sd, 0x050c, 0x2a);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050d, 0x01);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[EXPOSURE_IDX];
+
+ PDEBUG(D_V4L2, "Read exposure %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[EXPOSURE_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ PDEBUG(D_V4L2, "Set exposure to %d\n", val);
+ err = stv06xx_write_bridge(sd, 0x143d, val & 0xff);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x143e, val >> 8);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
new file mode 100644
index 0000000..b2d45fe
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
@@ -0,0 +1,59 @@
+/*
+ * Support for the sensor part which is integrated (I think) into the
+ * st6422 stv06xx alike bridge, as its integrated there are no i2c writes
+ * but instead direct bridge writes.
+ *
+ * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Strongly based on qc-usb-messenger, which is:
+ * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
+ * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
+ * Copyright (c) 2002, 2003 Tuukka Toivonen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef STV06XX_ST6422_H_
+#define STV06XX_ST6422_H_
+
+#include "stv06xx_sensor.h"
+
+static int st6422_probe(struct sd *sd);
+static int st6422_start(struct sd *sd);
+static int st6422_init(struct sd *sd);
+static int st6422_stop(struct sd *sd);
+static void st6422_disconnect(struct sd *sd);
+
+/* V4L2 controls supported by the driver */
+static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
+
+const struct stv06xx_sensor stv06xx_sensor_st6422 = {
+ .name = "ST6422",
+ .init = st6422_init,
+ .probe = st6422_probe,
+ .start = st6422_start,
+ .stop = st6422_stop,
+ .disconnect = st6422_disconnect,
+};
+
+#endif
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
index 84995bc..a3b77ed 100644
--- a/drivers/media/video/ivtv/ivtv-controls.c
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -60,6 +60,8 @@
switch (qctrl->id) {
/* Standard V4L2 controls */
+ case V4L2_CID_USER_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_HUE:
case V4L2_CID_SATURATION:
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 459c04c..4d794b4 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -280,15 +280,9 @@
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < 32 + icd->y_skip_top)
- pix->height = 32 + icd->y_skip_top;
- if (pix->height > 1024 + icd->y_skip_top)
- pix->height = 1024 + icd->y_skip_top;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 1280)
- pix->width = 1280;
- pix->width &= ~0x01; /* has to be even, unsure why was ~3 */
+ v4l_bound_align_image(&pix->width, 48, 1280, 1,
+ &pix->height, 32 + icd->y_skip_top,
+ 1024 + icd->y_skip_top, 0, 0);
return 0;
}
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index f72aeb7c..4207fb34 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -385,17 +385,9 @@
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < MT9T031_MIN_HEIGHT)
- pix->height = MT9T031_MIN_HEIGHT;
- if (pix->height > MT9T031_MAX_HEIGHT)
- pix->height = MT9T031_MAX_HEIGHT;
- if (pix->width < MT9T031_MIN_WIDTH)
- pix->width = MT9T031_MIN_WIDTH;
- if (pix->width > MT9T031_MAX_WIDTH)
- pix->width = MT9T031_MAX_WIDTH;
-
- pix->width &= ~0x01; /* has to be even */
- pix->height &= ~0x01; /* has to be even */
+ v4l_bound_align_image(
+ &pix->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
+ &pix->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
return 0;
}
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index be20d31..dbdcc86 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -364,15 +364,9 @@
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < 32 + icd->y_skip_top)
- pix->height = 32 + icd->y_skip_top;
- if (pix->height > 480 + icd->y_skip_top)
- pix->height = 480 + icd->y_skip_top;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 752)
- pix->width = 752;
- pix->width &= ~0x03; /* ? */
+ v4l_bound_align_image(&pix->width, 48, 752, 2 /* ? */,
+ &pix->height, 32 + icd->y_skip_top,
+ 480 + icd->y_skip_top, 0, 0);
return 0;
}
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 08cfd3e..0bc2cf5 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -211,8 +211,6 @@
static struct usb_device_id device_table [] = {
{ USB_DEVICE(VEND_OMNIVISION, PROD_OV511) },
{ USB_DEVICE(VEND_OMNIVISION, PROD_OV511PLUS) },
- { USB_DEVICE(VEND_OMNIVISION, PROD_OV518) },
- { USB_DEVICE(VEND_OMNIVISION, PROD_OV518PLUS) },
{ USB_DEVICE(VEND_MATTEL, PROD_ME2CAM) },
{ } /* Terminating entry */
};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
index 10ef1a2..416933c 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -48,11 +48,13 @@
MSP_DSP_IN_SCART),
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
};
void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -65,7 +67,7 @@
pvr2_trace(PVR2_TRACE_CHIPS, "subdev msp3400 v4l2 set_stereo");
if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
+ ((sp = routing_schemes[sid]) != NULL) &&
(hdw->input_val >= 0) &&
(hdw->input_val < sp->cnt)) {
input = sp->def[hdw->input_val];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
index 9023adf..68980e1 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
@@ -49,11 +49,13 @@
[PVR2_CVAL_INPUT_SVIDEO] = 0,
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_ONAIR] = {
- .def = routing_scheme1,
- .cnt = ARRAY_SIZE(routing_scheme1),
- },
+static const struct routing_scheme routing_def1 = {
+ .def = routing_scheme1,
+ .cnt = ARRAY_SIZE(routing_scheme1),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
};
@@ -65,12 +67,11 @@
u32 input;
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
hdw->input_val);
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- input = sp->def[hdw->input_val];
- } else {
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev v4l2 set_input:"
" Invalid routing scheme (%u)"
@@ -78,6 +79,7 @@
sid, hdw->input_val);
return;
}
+ input = sp->def[hdw->input_val];
sd->ops->audio->s_routing(sd, input, 0, 0);
}
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
index 05e5235..82c1358 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -68,6 +68,11 @@
},
};
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
/* Specific to gotview device */
static const struct routing_scheme_item routing_schemegv[] = {
[PVR2_CVAL_INPUT_TV] = {
@@ -90,15 +95,14 @@
},
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
- [PVR2_ROUTING_SCHEME_GOTVIEW] = {
- .def = routing_schemegv,
- .cnt = ARRAY_SIZE(routing_schemegv),
- },
+static const struct routing_scheme routing_defgv = {
+ .def = routing_schemegv,
+ .cnt = ARRAY_SIZE(routing_schemegv),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
+ [PVR2_ROUTING_SCHEME_GOTVIEW] = &routing_defgv,
};
void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -110,13 +114,11 @@
const struct routing_scheme *sp;
unsigned int sid = hdw->hdw_desc->signal_routing_scheme;
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- vid_input = sp->def[hdw->input_val].vid;
- aud_input = sp->def[hdw->input_val].aud;
- } else {
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev cx2584x set_input:"
" Invalid routing scheme (%u)"
@@ -124,7 +126,8 @@
sid, hdw->input_val);
return;
}
-
+ vid_input = sp->def[hdw->input_val].vid;
+ aud_input = sp->def[hdw->input_val].aud;
pvr2_trace(PVR2_TRACE_CHIPS,
"subdev cx2584x set_input vid=0x%x aud=0x%x",
vid_input, aud_input);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 0c745b1..cbc3887 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -85,8 +85,8 @@
module_param_array(tolerance, int, NULL, 0444);
MODULE_PARM_DESC(tolerance,"specify stream error tolerance");
-/* US Broadcast channel 7 (175.25 MHz) */
-static int default_tv_freq = 175250000L;
+/* US Broadcast channel 3 (61.25 MHz), to help with testing */
+static int default_tv_freq = 61250000L;
/* 104.3 MHz, a usable FM station for my area */
static int default_radio_freq = 104300000L;
@@ -1987,6 +1987,34 @@
}
+static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
+{
+ /*
+ Mike Isely <isely@pobox.com> 19-Nov-2006 - This bit of nuttiness
+ for cx25840 causes that module to correctly set up its video
+ scaling. This is really a problem in the cx25840 module itself,
+ but we work around it here. The problem has not been seen in
+ ivtv because there VBI is supported and set up. We don't do VBI
+ here (at least not yet) and thus we never attempted to even set
+ it up.
+ */
+ struct v4l2_format fmt;
+ if (hdw->decoder_client_id != PVR2_CLIENT_ID_CX25840) {
+ /* We're not using a cx25840 so don't enable the hack */
+ return;
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Module ID %u:"
+ " Executing cx25840 VBI hack",
+ hdw->decoder_client_id);
+ memset(&fmt, 0, sizeof(fmt));
+ fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
+ video, s_fmt, &fmt);
+}
+
+
static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
const struct pvr2_device_client_desc *cd)
{
@@ -2078,30 +2106,6 @@
/* client-specific setup... */
switch (mid) {
case PVR2_CLIENT_ID_CX25840:
- hdw->decoder_client_id = mid;
- {
- /*
- Mike Isely <isely@pobox.com> 19-Nov-2006 - This
- bit of nuttiness for cx25840 causes that module
- to correctly set up its video scaling. This is
- really a problem in the cx25840 module itself,
- but we work around it here. The problem has not
- been seen in ivtv because there VBI is supported
- and set up. We don't do VBI here (at least not
- yet) and thus we never attempted to even set it
- up.
- */
- struct v4l2_format fmt;
- pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Executing cx25840 VBI hack",
- mid);
- memset(&fmt, 0, sizeof(fmt));
- fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- v4l2_device_call_all(&hdw->v4l2_dev, mid,
- video, s_fmt, &fmt);
- }
- break;
case PVR2_CLIENT_ID_SAA7115:
hdw->decoder_client_id = mid;
break;
@@ -2202,6 +2206,8 @@
cptr->info->set_value(cptr,~0,cptr->info->default_value);
}
+ pvr2_hdw_cx25840_vbi_hack(hdw);
+
/* Set up special default values for the television and radio
frequencies here. It's not really important what these defaults
are, but I set them to something usable in the Chicago area just
@@ -2954,6 +2960,7 @@
vs = hdw->std_mask_cur;
v4l2_device_call_all(&hdw->v4l2_dev, 0,
core, s_std, vs);
+ pvr2_hdw_cx25840_vbi_hack(hdw);
}
hdw->tuner_signal_stale = !0;
hdw->cropcap_stale = !0;
@@ -4076,6 +4083,7 @@
if (hdw->decoder_client_id) {
v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
core, reset, 0);
+ pvr2_hdw_cx25840_vbi_hack(hdw);
return 0;
}
pvr2_trace(PVR2_TRACE_INIT,
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
index d2fe7c8..4c96cf4 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -54,6 +54,11 @@
[PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2,
};
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
static const int routing_scheme1[] = {
[PVR2_CVAL_INPUT_TV] = SAA7115_COMPOSITE4,
[PVR2_CVAL_INPUT_RADIO] = SAA7115_COMPOSITE5,
@@ -61,15 +66,14 @@
[PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, /* or SVIDEO0, it seems */
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
- [PVR2_ROUTING_SCHEME_ONAIR] = {
- .def = routing_scheme1,
- .cnt = ARRAY_SIZE(routing_scheme1),
- },
+static const struct routing_scheme routing_def1 = {
+ .def = routing_scheme1,
+ .cnt = ARRAY_SIZE(routing_scheme1),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
+ [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
};
void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -81,12 +85,12 @@
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
hdw->input_val);
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- input = sp->def[hdw->input_val];
- } else {
+
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev v4l2 set_input:"
" Invalid routing scheme (%u)"
@@ -94,6 +98,7 @@
sid, hdw->input_val);
return;
}
+ input = sp->def[hdw->input_val];
sd->ops->video->s_routing(sd, input, 0, 0);
}
}
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index f60de40..46e0d8a 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -163,13 +163,6 @@
CICR0_EOFM | CICR0_FOM)
/*
- * YUV422P picture size should be a multiple of 16, so the heuristic aligns
- * height, width on 4 byte boundaries to reach the 16 multiple for the size.
- */
-#define YUV422P_X_Y_ALIGN 4
-#define YUV422P_SIZE_ALIGN YUV422P_X_Y_ALIGN * YUV422P_X_Y_ALIGN
-
-/*
* Structures
*/
enum pxa_camera_active_dma {
@@ -1398,28 +1391,15 @@
return -EINVAL;
}
- /* limit to pxa hardware capabilities */
- if (pix->height < 32)
- pix->height = 32;
- if (pix->height > 2048)
- pix->height = 2048;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 2048)
- pix->width = 2048;
- pix->width &= ~0x01;
-
/*
- * YUV422P planar format requires images size to be a 16 bytes
- * multiple. If not, zeros will be inserted between Y and U planes, and
- * U and V planes, and YUV422P standard would be violated.
+ * Limit to pxa hardware capabilities. YUV422P planar format requires
+ * images size to be a multiple of 16 bytes. If not, zeros will be
+ * inserted between Y and U planes, and U and V planes, which violates
+ * the YUV422P standard.
*/
- if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P) {
- if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN))
- pix->height = ALIGN(pix->height, YUV422P_X_Y_ALIGN);
- if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN))
- pix->width = ALIGN(pix->width, YUV422P_X_Y_ALIGN);
- }
+ v4l_bound_align_image(&pix->width, 48, 2048, 1,
+ &pix->height, 32, 2048, 0,
+ xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
pix->bytesperline = pix->width *
DIV_ROUND_UP(xlate->host_fmt->depth, 8);
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index e305c16..ba87128 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1640,15 +1640,8 @@
}
f->fmt.pix.field = field;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index d369e84..0db88a5 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -689,16 +689,8 @@
/* FIXME: calculate using depth and bus width */
- if (f->fmt.pix.height < 4)
- f->fmt.pix.height = 4;
- if (f->fmt.pix.height > 1920)
- f->fmt.pix.height = 1920;
- if (f->fmt.pix.width < 2)
- f->fmt.pix.width = 2;
- if (f->fmt.pix.width > 2560)
- f->fmt.pix.width = 2560;
- f->fmt.pix.width &= ~0x01;
- f->fmt.pix.height &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 2, 2560, 1,
+ &f->fmt.pix.height, 4, 1920, 2, 0);
f->fmt.pix.bytesperline = f->fmt.pix.width *
DIV_ROUND_UP(xlate->host_fmt->depth, 8);
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index b30c492..b90e9da 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -878,7 +878,7 @@
return rval;
}
-static int __exit tcm825x_remove(struct i2c_client *client)
+static int tcm825x_remove(struct i2c_client *client)
{
struct tcm825x_sensor *sensor = i2c_get_clientdata(client);
@@ -902,7 +902,7 @@
.name = TCM825X_NAME,
},
.probe = tcm825x_probe,
- .remove = __exit_p(tcm825x_remove),
+ .remove = tcm825x_remove,
.id_table = tcm825x_id,
};
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig
index e4cb99c..adb1c04 100644
--- a/drivers/media/video/usbvideo/Kconfig
+++ b/drivers/media/video/usbvideo/Kconfig
@@ -38,10 +38,13 @@
module will be called konicawc.
config USB_QUICKCAM_MESSENGER
- tristate "USB Logitech Quickcam Messenger"
+ tristate "USB Logitech Quickcam Messenger (DEPRECATED)"
depends on VIDEO_V4L1
select VIDEO_USBVIDEO
---help---
+ This driver is DEPRECATED please use the gspca stv06xx module
+ instead.
+
Say Y or M here to enable support for the USB Logitech Quickcam
Messenger webcam.
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index f964756..b91d66a 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -802,6 +802,17 @@
/* Decrease the module use count to match the first try_module_get. */
module_put(client->driver->driver.owner);
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config, 0, NULL);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
@@ -852,6 +863,17 @@
/* Decrease the module use count to match the first try_module_get. */
module_put(client->driver->driver.owner);
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config, 0, NULL);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
@@ -872,6 +894,89 @@
}
EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev_addr);
+/* Load an i2c sub-device. */
+struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, const char *module_name,
+ struct i2c_board_info *info, const unsigned short *probe_addrs)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct i2c_client *client;
+
+ BUG_ON(!v4l2_dev);
+
+ if (module_name)
+ request_module(module_name);
+
+ /* Create the i2c client */
+ if (info->addr == 0 && probe_addrs)
+ client = i2c_new_probed_device(adapter, info, probe_addrs);
+ else
+ client = i2c_new_device(adapter, info);
+
+ /* Note: by loading the module first we are certain that c->driver
+ will be set if the driver was found. If the module was not loaded
+ first, then the i2c core tries to delay-load the module for us,
+ and then c->driver is still NULL until the module is finally
+ loaded. This delay-load mechanism doesn't work if other drivers
+ want to use the i2c device, so explicitly loading the module
+ is the best alternative. */
+ if (client == NULL || client->driver == NULL)
+ goto error;
+
+ /* Lock the module so we can safely get the v4l2_subdev pointer */
+ if (!try_module_get(client->driver->driver.owner))
+ goto error;
+ sd = i2c_get_clientdata(client);
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(client->driver->driver.owner);
+
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config,
+ info->irq, info->platform_data);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
+error:
+ /* If we have a client but no subdev, then something went wrong and
+ we must unregister the client. */
+ if (client && sd == NULL)
+ i2c_unregister_device(client);
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
+
+struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *module_name, const char *client_type,
+ int irq, void *platform_data,
+ u8 addr, const unsigned short *probe_addrs)
+{
+ struct i2c_board_info info;
+
+ /* Setup the i2c board info with the device type and
+ the device address. */
+ memset(&info, 0, sizeof(info));
+ strlcpy(info.type, client_type, sizeof(info.type));
+ info.addr = addr;
+ info.irq = irq;
+ info.platform_data = platform_data;
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name,
+ &info, probe_addrs);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
+
/* Return i2c client address of v4l2_subdev. */
unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
{
@@ -916,4 +1021,78 @@
}
EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
-#endif
+#endif /* defined(CONFIG_I2C) */
+
+/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
+ * and max don't have to be aligned, but there must be at least one valid
+ * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
+ * of 16 between 17 and 31. */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int align)
+{
+ /* Bits that must be zero to be aligned */
+ unsigned int mask = ~((1 << align) - 1);
+
+ /* Round to nearest aligned value */
+ if (align)
+ x = (x + (1 << (align - 1))) & mask;
+
+ /* Clamp to aligned value of min and max */
+ if (x < min)
+ x = (min + ~mask) & mask;
+ else if (x > max)
+ x = max & mask;
+
+ return x;
+}
+
+/* Bound an image to have a width between wmin and wmax, and height between
+ * hmin and hmax, inclusive. Additionally, the width will be a multiple of
+ * 2^walign, the height will be a multiple of 2^halign, and the overall size
+ * (width*height) will be a multiple of 2^salign. The image may be shrunk
+ * or enlarged to fit the alignment constraints.
+ *
+ * The width or height maximum must not be smaller than the corresponding
+ * minimum. The alignments must not be so high there are no possible image
+ * sizes within the allowed bounds. wmin and hmin must be at least 1
+ * (don't use 0). If you don't care about a certain alignment, specify 0,
+ * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If
+ * you only want to adjust downward, specify a maximum that's the same as
+ * the initial value.
+ */
+void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
+ unsigned int walign,
+ u32 *h, unsigned int hmin, unsigned int hmax,
+ unsigned int halign, unsigned int salign)
+{
+ *w = clamp_align(*w, wmin, wmax, walign);
+ *h = clamp_align(*h, hmin, hmax, halign);
+
+ /* Usually we don't need to align the size and are done now. */
+ if (!salign)
+ return;
+
+ /* How much alignment do we have? */
+ walign = __ffs(*w);
+ halign = __ffs(*h);
+ /* Enough to satisfy the image alignment? */
+ if (walign + halign < salign) {
+ /* Max walign where there is still a valid width */
+ unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
+ /* Max halign where there is still a valid height */
+ unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
+
+ /* up the smaller alignment until we have enough */
+ do {
+ if (halign >= hmaxa ||
+ (walign <= halign && walign < wmaxa)) {
+ *w = clamp_align(*w, wmin, wmax, walign + 1);
+ walign = __ffs(*w);
+ } else {
+ *h = clamp_align(*h, hmin, hmax, halign + 1);
+ halign = __ffs(*h);
+ }
+ } while (halign + walign < salign);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l_bound_align_image);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index fbfefae..cd72668 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -883,15 +883,8 @@
maxh = norm_maxh();
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index f59b2bd..6c3f23e 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -460,7 +460,7 @@
static int w9968cf_set_window(struct w9968cf_device*, struct video_window);
static int w9968cf_postprocess_frame(struct w9968cf_device*,
struct w9968cf_frame_t*);
-static int w9968cf_adjust_window_size(struct w9968cf_device*, u16* w, u16* h);
+static int w9968cf_adjust_window_size(struct w9968cf_device*, u32 *w, u32 *h);
static void w9968cf_init_framelist(struct w9968cf_device*);
static void w9968cf_push_frame(struct w9968cf_device*, u8 f_num);
static void w9968cf_pop_frame(struct w9968cf_device*,struct w9968cf_frame_t**);
@@ -1763,8 +1763,7 @@
#define UNSC(x) ((x) >> 10)
/* Make sure we are using a supported resolution */
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width,
- (u16*)&win.height)))
+ if ((err = w9968cf_adjust_window_size(cam, &win.width, &win.height)))
goto error;
/* Scaling factors */
@@ -1914,12 +1913,9 @@
Return 0 on success, -1 otherwise.
--------------------------------------------------------------------------*/
static int
-w9968cf_adjust_window_size(struct w9968cf_device* cam, u16* width, u16* height)
+w9968cf_adjust_window_size(struct w9968cf_device *cam, u32 *width, u32 *height)
{
- u16 maxw, maxh;
-
- if ((*width < cam->minwidth) || (*height < cam->minheight))
- return -ERANGE;
+ unsigned int maxw, maxh, align;
maxw = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) &&
w9968cf_vpp ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
@@ -1927,16 +1923,10 @@
maxh = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) &&
w9968cf_vpp ? max((u16)W9968CF_MAX_HEIGHT, cam->maxheight)
: cam->maxheight;
+ align = (cam->vpp_flag & VPP_DECOMPRESSION) ? 4 : 0;
- if (*width > maxw)
- *width = maxw;
- if (*height > maxh)
- *height = maxh;
-
- if (cam->vpp_flag & VPP_DECOMPRESSION) {
- *width &= ~15L; /* multiple of 16 */
- *height &= ~15L;
- }
+ v4l_bound_align_image(width, cam->minwidth, maxw, align,
+ height, cam->minheight, maxh, align, 0);
PDBGG("Window size adjusted w=%u, h=%u ", *width, *height)
@@ -3043,8 +3033,8 @@
if (win.clipcount != 0 || win.flags != 0)
return -EINVAL;
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width,
- (u16*)&win.height))) {
+ if ((err = w9968cf_adjust_window_size(cam, &win.width,
+ &win.height))) {
DBG(4, "Resolution not supported (%ux%u). "
"VIDIOCSWIN failed", win.width, win.height)
return err;
@@ -3116,6 +3106,7 @@
{
struct video_mmap mmap;
struct w9968cf_frame_t* fr;
+ u32 w, h;
int err = 0;
if (copy_from_user(&mmap, arg, sizeof(mmap)))
@@ -3164,8 +3155,10 @@
}
}
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&mmap.width,
- (u16*)&mmap.height))) {
+ w = mmap.width; h = mmap.height;
+ err = w9968cf_adjust_window_size(cam, &w, &h);
+ mmap.width = w; mmap.height = h;
+ if (err) {
DBG(4, "Resolution not supported (%dx%d). "
"VIDIOCMCAPTURE failed",
mmap.width, mmap.height)
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 643ccca..3d7df32 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -2088,16 +2088,10 @@
return -EINVAL;
}
- bpp = (zoran_formats[i].depth + 7) / 8;
- fmt->fmt.pix.width &= ~((bpp == 2) ? 1 : 3);
- if (fmt->fmt.pix.width > BUZ_MAX_WIDTH)
- fmt->fmt.pix.width = BUZ_MAX_WIDTH;
- if (fmt->fmt.pix.width < BUZ_MIN_WIDTH)
- fmt->fmt.pix.width = BUZ_MIN_WIDTH;
- if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
- fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
- if (fmt->fmt.pix.height < BUZ_MIN_HEIGHT)
- fmt->fmt.pix.height = BUZ_MIN_HEIGHT;
+ bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
+ v4l_bound_align_image(
+ &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
+ &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
mutex_unlock(&zr->resource_lock);
return 0;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 40111a6..891ef18 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -94,6 +94,31 @@
If unsure, say N.
+config MMC_SDHCI_S3C
+ tristate "SDHCI support on Samsung S3C SoC"
+ depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ often referrered to as the HSMMC block in some of the Samsung S3C
+ range of SoC.
+
+ Note, due to the problems with DMA, the DMA support is only
+ available with CONFIG_EXPERIMENTAL is selected.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_S3C_DMA
+ bool "DMA support on S3C SDHCI"
+ depends on MMC_SDHCI_S3C && EXPERIMENTAL
+ help
+ Enable DMA support on the Samsung S3C SDHCI glue. The DMA
+ has proved to be problematic if the controller encounters
+ certain errors, and thus should be treated with care.
+
+ YMMV.
+
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP
@@ -265,3 +290,14 @@
This driver can also be built as a module. If so, the module
will be called cb710-mmc.
+config MMC_VIA_SDMMC
+ tristate "VIA SD/MMC Card Reader Driver"
+ depends on PCI
+ help
+ This selects the VIA SD/MMC Card Reader driver, say Y or M here.
+ VIA provides one multi-functional card reader which integrated into
+ some motherboards manufactured by VIA. This card reader supports
+ SD/MMC/SDHC.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 79da397..cf153f6 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -15,6 +15,7 @@
obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
+obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -31,6 +32,7 @@
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
+obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 4eb4f37..8c08cd7 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -794,7 +794,7 @@
host->mem->start + host->sdidata);
if (!setup_ok) {
- s3c2410_dma_config(host->dma, 4, 0);
+ s3c2410_dma_config(host->dma, 4);
s3c2410_dma_set_buffdone_fn(host->dma,
s3cmci_dma_done_callback);
s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
diff --git a/drivers/mmc/host/sdhci-of.c b/drivers/mmc/host/sdhci-of.c
index 128c614..d79fa55 100644
--- a/drivers/mmc/host/sdhci-of.c
+++ b/drivers/mmc/host/sdhci-of.c
@@ -250,6 +250,9 @@
host->ops = &sdhci_of_data->ops;
}
+ if (of_get_property(np, "sdhci,1-bit-only", NULL))
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+
clk = of_get_property(np, "clock-frequency", &size);
if (clk && size == sizeof(*clk) && *clk)
of_host->clock = *clk;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 65be279..2f15cc1 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -284,6 +284,18 @@
.resume = jmicron_resume,
};
+static int via_probe(struct sdhci_pci_chip *chip)
+{
+ if (chip->pdev->revision == 0x10)
+ chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
+
+ return 0;
+}
+
+static const struct sdhci_pci_fixes sdhci_via = {
+ .probe = via_probe,
+};
+
static const struct pci_device_id pci_ids[] __devinitdata = {
{
.vendor = PCI_VENDOR_ID_RICOH,
@@ -349,6 +361,14 @@
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
},
+ {
+ .vendor = PCI_VENDOR_ID_VIA,
+ .device = 0x95d0,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_via,
+ },
+
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
new file mode 100644
index 0000000..50997d2
--- /dev/null
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -0,0 +1,428 @@
+/* linux/drivers/mmc/host/sdhci-s3c.c
+ *
+ * Copyright 2008 Openmoko Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * SDHCI (HSMMC) support for Samsung SoC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <linux/mmc/host.h>
+
+#include <plat/sdhci.h>
+#include <plat/regs-sdhci.h>
+
+#include "sdhci.h"
+
+#define MAX_BUS_CLK (4)
+
+/**
+ * struct sdhci_s3c - S3C SDHCI instance
+ * @host: The SDHCI host created
+ * @pdev: The platform device we where created from.
+ * @ioarea: The resource created when we claimed the IO area.
+ * @pdata: The platform data for this controller.
+ * @cur_clk: The index of the current bus clock.
+ * @clk_io: The clock for the internal bus interface.
+ * @clk_bus: The clocks that are available for the SD/MMC bus clock.
+ */
+struct sdhci_s3c {
+ struct sdhci_host *host;
+ struct platform_device *pdev;
+ struct resource *ioarea;
+ struct s3c_sdhci_platdata *pdata;
+ unsigned int cur_clk;
+
+ struct clk *clk_io;
+ struct clk *clk_bus[MAX_BUS_CLK];
+};
+
+static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
+{
+ return sdhci_priv(host);
+}
+
+/**
+ * get_curclk - convert ctrl2 register to clock source number
+ * @ctrl2: Control2 register value.
+ */
+static u32 get_curclk(u32 ctrl2)
+{
+ ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
+
+ return ctrl2;
+}
+
+static void sdhci_s3c_check_sclk(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
+
+ if (get_curclk(tmp) != ourhost->cur_clk) {
+ dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n");
+
+ tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
+ writel(tmp, host->ioaddr + 0x80);
+ }
+}
+
+/**
+ * sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
+ * @host: The SDHCI host instance.
+ *
+ * Callback to return the maximum clock rate acheivable by the controller.
+*/
+static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ struct clk *busclk;
+ unsigned int rate, max;
+ int clk;
+
+ /* note, a reset will reset the clock source */
+
+ sdhci_s3c_check_sclk(host);
+
+ for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
+ busclk = ourhost->clk_bus[clk];
+ if (!busclk)
+ continue;
+
+ rate = clk_get_rate(busclk);
+ if (rate > max)
+ max = rate;
+ }
+
+ return max;
+}
+
+static unsigned int sdhci_s3c_get_timeout_clk(struct sdhci_host *host)
+{
+ return sdhci_s3c_get_max_clk(host) / 1000000;
+}
+
+/**
+ * sdhci_s3c_consider_clock - consider one the bus clocks for current setting
+ * @ourhost: Our SDHCI instance.
+ * @src: The source clock index.
+ * @wanted: The clock frequency wanted.
+ */
+static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
+ unsigned int src,
+ unsigned int wanted)
+{
+ unsigned long rate;
+ struct clk *clksrc = ourhost->clk_bus[src];
+ int div;
+
+ if (!clksrc)
+ return UINT_MAX;
+
+ rate = clk_get_rate(clksrc);
+
+ for (div = 1; div < 256; div *= 2) {
+ if ((rate / div) <= wanted)
+ break;
+ }
+
+ dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
+ src, rate, wanted, rate / div);
+
+ return (wanted - (rate / div));
+}
+
+/**
+ * sdhci_s3c_set_clock - callback on clock change
+ * @host: The SDHCI host being changed
+ * @clock: The clock rate being requested.
+ *
+ * When the card's clock is going to be changed, look at the new frequency
+ * and find the best clock source to go with it.
+*/
+static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_s3c *ourhost = to_s3c(host);
+ unsigned int best = UINT_MAX;
+ unsigned int delta;
+ int best_src = 0;
+ int src;
+ u32 ctrl;
+
+ /* don't bother if the clock is going off. */
+ if (clock == 0)
+ return;
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ delta = sdhci_s3c_consider_clock(ourhost, src, clock);
+ if (delta < best) {
+ best = delta;
+ best_src = src;
+ }
+ }
+
+ dev_dbg(&ourhost->pdev->dev,
+ "selected source %d, clock %d, delta %d\n",
+ best_src, clock, best);
+
+ /* select the new clock source */
+
+ if (ourhost->cur_clk != best_src) {
+ struct clk *clk = ourhost->clk_bus[best_src];
+
+ /* turn clock off to card before changing clock source */
+ writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
+
+ ourhost->cur_clk = best_src;
+ host->max_clk = clk_get_rate(clk);
+ host->timeout_clk = sdhci_s3c_get_timeout_clk(host);
+
+ ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
+ ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
+ ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
+ writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
+ }
+
+ /* reconfigure the hardware for new clock rate */
+
+ {
+ struct mmc_ios ios;
+
+ ios.clock = clock;
+
+ if (ourhost->pdata->cfg_card)
+ (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
+ &ios, NULL);
+ }
+}
+
+static struct sdhci_ops sdhci_s3c_ops = {
+ .get_max_clock = sdhci_s3c_get_max_clk,
+ .get_timeout_clock = sdhci_s3c_get_timeout_clk,
+ .set_clock = sdhci_s3c_set_clock,
+};
+
+static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
+{
+ struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ struct sdhci_s3c *sc;
+ struct resource *res;
+ int ret, irq, ptr, clks;
+
+ if (!pdata) {
+ dev_err(dev, "no device data specified\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq specified\n");
+ return irq;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no memory specified\n");
+ return -ENOENT;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
+ if (IS_ERR(host)) {
+ dev_err(dev, "sdhci_alloc_host() failed\n");
+ return PTR_ERR(host);
+ }
+
+ sc = sdhci_priv(host);
+
+ sc->host = host;
+ sc->pdev = pdev;
+ sc->pdata = pdata;
+
+ platform_set_drvdata(pdev, host);
+
+ sc->clk_io = clk_get(dev, "hsmmc");
+ if (IS_ERR(sc->clk_io)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(sc->clk_io);
+ goto err_io_clk;
+ }
+
+ /* enable the local io clock and keep it running for the moment. */
+ clk_enable(sc->clk_io);
+
+ for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ struct clk *clk;
+ char *name = pdata->clocks[ptr];
+
+ if (name == NULL)
+ continue;
+
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock %s\n", name);
+ continue;
+ }
+
+ clks++;
+ sc->clk_bus[ptr] = clk;
+ clk_enable(clk);
+
+ dev_info(dev, "clock source %d: %s (%ld Hz)\n",
+ ptr, name, clk_get_rate(clk));
+ }
+
+ if (clks == 0) {
+ dev_err(dev, "failed to find any bus clocks\n");
+ ret = -ENOENT;
+ goto err_no_busclks;
+ }
+
+ sc->ioarea = request_mem_region(res->start, resource_size(res),
+ mmc_hostname(host->mmc));
+ if (!sc->ioarea) {
+ dev_err(dev, "failed to reserve register area\n");
+ ret = -ENXIO;
+ goto err_req_regs;
+ }
+
+ host->ioaddr = ioremap_nocache(res->start, resource_size(res));
+ if (!host->ioaddr) {
+ dev_err(dev, "failed to map registers\n");
+ ret = -ENXIO;
+ goto err_req_regs;
+ }
+
+ /* Ensure we have minimal gpio selected CMD/CLK/Detect */
+ if (pdata->cfg_gpio)
+ pdata->cfg_gpio(pdev, pdata->max_width);
+
+ host->hw_name = "samsung-hsmmc";
+ host->ops = &sdhci_s3c_ops;
+ host->quirks = 0;
+ host->irq = irq;
+
+ /* Setup quirks for the controller */
+
+ /* Currently with ADMA enabled we are getting some length
+ * interrupts that are not being dealt with, do disable
+ * ADMA until this is sorted out. */
+ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+ host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
+
+#ifndef CONFIG_MMC_SDHCI_S3C_DMA
+
+ /* we currently see overruns on errors, so disable the SDMA
+ * support as well. */
+ host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
+
+ /* PIO currently has problems with multi-block IO */
+ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
+
+#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
+
+ /* It seems we do not get an DATA transfer complete on non-busy
+ * transfers, not sure if this is a problem with this specific
+ * SDHCI block, or a missing configuration that needs to be set. */
+ host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
+
+ host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_32BIT_DMA_SIZE);
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(dev, "sdhci_add_host() failed\n");
+ goto err_add_host;
+ }
+
+ return 0;
+
+ err_add_host:
+ release_resource(sc->ioarea);
+ kfree(sc->ioarea);
+
+ err_req_regs:
+ for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+ }
+
+ err_no_busclks:
+ clk_disable(sc->clk_io);
+ clk_put(sc->clk_io);
+
+ err_io_clk:
+ sdhci_free_host(host);
+
+ return ret;
+}
+
+static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ sdhci_suspend_host(host, pm);
+ return 0;
+}
+
+static int sdhci_s3c_resume(struct platform_device *dev)
+{
+ struct sdhci_host *host = platform_get_drvdata(dev);
+
+ sdhci_resume_host(host);
+ return 0;
+}
+
+#else
+#define sdhci_s3c_suspend NULL
+#define sdhci_s3c_resume NULL
+#endif
+
+static struct platform_driver sdhci_s3c_driver = {
+ .probe = sdhci_s3c_probe,
+ .remove = __devexit_p(sdhci_s3c_remove),
+ .suspend = sdhci_s3c_suspend,
+ .resume = sdhci_s3c_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s3c-sdhci",
+ },
+};
+
+static int __init sdhci_s3c_init(void)
+{
+ return platform_driver_register(&sdhci_s3c_driver);
+}
+
+static void __exit sdhci_s3c_exit(void)
+{
+ platform_driver_unregister(&sdhci_s3c_driver);
+}
+
+module_init(sdhci_s3c_init);
+module_exit(sdhci_s3c_exit);
+
+MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 35789c6..6779b4e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -584,7 +584,7 @@
* longer to time out, but that's much better than having a too-short
* timeout value.
*/
- if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
+ if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return 0xE;
/* timeout in us */
@@ -1051,12 +1051,19 @@
* At least the Marvell CaFe chip gets confused if we set the voltage
* and set turn on power at the same time, so set the voltage first.
*/
- if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
+ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+
+ /*
+ * Some controllers need an extra 10ms delay of 10ms before they
+ * can apply clock after applying power
+ */
+ if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
+ mdelay(10);
}
/*****************************************************************************\
@@ -1382,6 +1389,35 @@
sdhci_finish_command(host);
}
+#ifdef DEBUG
+static void sdhci_show_adma_error(struct sdhci_host *host)
+{
+ const char *name = mmc_hostname(host->mmc);
+ u8 *desc = host->adma_desc;
+ __le32 *dma;
+ __le16 *len;
+ u8 attr;
+
+ sdhci_dumpregs(host);
+
+ while (true) {
+ dma = (__le32 *)(desc + 4);
+ len = (__le16 *)(desc + 2);
+ attr = *desc;
+
+ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
+ name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
+
+ desc += 8;
+
+ if (attr & 2)
+ break;
+ }
+}
+#else
+static void sdhci_show_adma_error(struct sdhci_host *host) { }
+#endif
+
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
BUG_ON(intmask == 0);
@@ -1411,8 +1447,11 @@
host->data->error = -ETIMEDOUT;
else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
host->data->error = -EILSEQ;
- else if (intmask & SDHCI_INT_ADMA_ERROR)
+ else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
+ sdhci_show_adma_error(host);
host->data->error = -EIO;
+ }
if (host->data->error)
sdhci_finish_data(host);
@@ -1729,7 +1768,10 @@
mmc->ops = &sdhci_ops;
mmc->f_min = host->max_clk / 256;
mmc->f_max = host->max_clk;
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ mmc->caps = MMC_CAP_SDIO_IRQ;
+
+ if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
if (caps & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
@@ -1802,7 +1844,7 @@
/*
* Maximum block count.
*/
- mmc->max_blk_count = 65535;
+ mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
/*
* Init tasklets.
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 2de0834..831ddf7 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -226,6 +226,12 @@
#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
/* Controller has to be forced to use block size of 2048 bytes */
#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
+/* Controller cannot do multi-block transfers */
+#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
+/* Controller can only handle 1-bit data transfers */
+#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
+/* Controller needs 10ms delay between applying power and clock */
+#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
int irq; /* Device IRQ */
void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
new file mode 100644
index 0000000..632858a
--- /dev/null
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -0,0 +1,1362 @@
+/*
+ * drivers/mmc/host/via-sdmmc.c - VIA SD/MMC Card Reader driver
+ * Copyright (c) 2008, VIA Technologies Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+
+#include <linux/mmc/host.h>
+
+#define DRV_NAME "via_sdmmc"
+
+#define PCI_DEVICE_ID_VIA_9530 0x9530
+
+#define VIA_CRDR_SDC_OFF 0x200
+#define VIA_CRDR_DDMA_OFF 0x400
+#define VIA_CRDR_PCICTRL_OFF 0x600
+
+#define VIA_CRDR_MIN_CLOCK 375000
+#define VIA_CRDR_MAX_CLOCK 48000000
+
+/*
+ * PCI registers
+ */
+
+#define VIA_CRDR_PCI_WORK_MODE 0x40
+#define VIA_CRDR_PCI_DBG_MODE 0x41
+
+/*
+ * SDC MMIO Registers
+ */
+
+#define VIA_CRDR_SDCTRL 0x0
+#define VIA_CRDR_SDCTRL_START 0x01
+#define VIA_CRDR_SDCTRL_WRITE 0x04
+#define VIA_CRDR_SDCTRL_SINGLE_WR 0x10
+#define VIA_CRDR_SDCTRL_SINGLE_RD 0x20
+#define VIA_CRDR_SDCTRL_MULTI_WR 0x30
+#define VIA_CRDR_SDCTRL_MULTI_RD 0x40
+#define VIA_CRDR_SDCTRL_STOP 0x70
+
+#define VIA_CRDR_SDCTRL_RSP_NONE 0x0
+#define VIA_CRDR_SDCTRL_RSP_R1 0x10000
+#define VIA_CRDR_SDCTRL_RSP_R2 0x20000
+#define VIA_CRDR_SDCTRL_RSP_R3 0x30000
+#define VIA_CRDR_SDCTRL_RSP_R1B 0x90000
+
+#define VIA_CRDR_SDCARG 0x4
+
+#define VIA_CRDR_SDBUSMODE 0x8
+#define VIA_CRDR_SDMODE_4BIT 0x02
+#define VIA_CRDR_SDMODE_CLK_ON 0x40
+
+#define VIA_CRDR_SDBLKLEN 0xc
+/*
+ * Bit 0 -Bit 10 : Block length. So, the maximum block length should be 2048.
+ * Bit 11 - Bit 13 : Reserved.
+ * GPIDET : Select GPI pin to detect card, GPI means CR_CD# in top design.
+ * INTEN : Enable SD host interrupt.
+ * Bit 16 - Bit 31 : Block count. So, the maximun block count should be 65536.
+ */
+#define VIA_CRDR_SDBLKLEN_GPIDET 0x2000
+#define VIA_CRDR_SDBLKLEN_INTEN 0x8000
+#define VIA_CRDR_MAX_BLOCK_COUNT 65536
+#define VIA_CRDR_MAX_BLOCK_LENGTH 2048
+
+#define VIA_CRDR_SDRESP0 0x10
+#define VIA_CRDR_SDRESP1 0x14
+#define VIA_CRDR_SDRESP2 0x18
+#define VIA_CRDR_SDRESP3 0x1c
+
+#define VIA_CRDR_SDCURBLKCNT 0x20
+
+#define VIA_CRDR_SDINTMASK 0x24
+/*
+ * MBDIE : Multiple Blocks transfer Done Interrupt Enable
+ * BDDIE : Block Data transfer Done Interrupt Enable
+ * CIRIE : Card Insertion or Removal Interrupt Enable
+ * CRDIE : Command-Response transfer Done Interrupt Enable
+ * CRTOIE : Command-Response response TimeOut Interrupt Enable
+ * ASCRDIE : Auto Stop Command-Response transfer Done Interrupt Enable
+ * DTIE : Data access Timeout Interrupt Enable
+ * SCIE : reSponse CRC error Interrupt Enable
+ * RCIE : Read data CRC error Interrupt Enable
+ * WCIE : Write data CRC error Interrupt Enable
+ */
+#define VIA_CRDR_SDINTMASK_MBDIE 0x10
+#define VIA_CRDR_SDINTMASK_BDDIE 0x20
+#define VIA_CRDR_SDINTMASK_CIRIE 0x80
+#define VIA_CRDR_SDINTMASK_CRDIE 0x200
+#define VIA_CRDR_SDINTMASK_CRTOIE 0x400
+#define VIA_CRDR_SDINTMASK_ASCRDIE 0x800
+#define VIA_CRDR_SDINTMASK_DTIE 0x1000
+#define VIA_CRDR_SDINTMASK_SCIE 0x2000
+#define VIA_CRDR_SDINTMASK_RCIE 0x4000
+#define VIA_CRDR_SDINTMASK_WCIE 0x8000
+
+#define VIA_CRDR_SDACTIVE_INTMASK \
+ (VIA_CRDR_SDINTMASK_MBDIE | VIA_CRDR_SDINTMASK_CIRIE \
+ | VIA_CRDR_SDINTMASK_CRDIE | VIA_CRDR_SDINTMASK_CRTOIE \
+ | VIA_CRDR_SDINTMASK_DTIE | VIA_CRDR_SDINTMASK_SCIE \
+ | VIA_CRDR_SDINTMASK_RCIE | VIA_CRDR_SDINTMASK_WCIE)
+
+#define VIA_CRDR_SDSTATUS 0x28
+/*
+ * CECC : Reserved
+ * WP : SD card Write Protect status
+ * SLOTD : Reserved
+ * SLOTG : SD SLOT status(Gpi pin status)
+ * MBD : Multiple Blocks transfer Done interrupt status
+ * BDD : Block Data transfer Done interrupt status
+ * CD : Reserved
+ * CIR : Card Insertion or Removal interrupt detected on GPI pin
+ * IO : Reserved
+ * CRD : Command-Response transfer Done interrupt status
+ * CRTO : Command-Response response TimeOut interrupt status
+ * ASCRDIE : Auto Stop Command-Response transfer Done interrupt status
+ * DT : Data access Timeout interrupt status
+ * SC : reSponse CRC error interrupt status
+ * RC : Read data CRC error interrupt status
+ * WC : Write data CRC error interrupt status
+ */
+#define VIA_CRDR_SDSTS_CECC 0x01
+#define VIA_CRDR_SDSTS_WP 0x02
+#define VIA_CRDR_SDSTS_SLOTD 0x04
+#define VIA_CRDR_SDSTS_SLOTG 0x08
+#define VIA_CRDR_SDSTS_MBD 0x10
+#define VIA_CRDR_SDSTS_BDD 0x20
+#define VIA_CRDR_SDSTS_CD 0x40
+#define VIA_CRDR_SDSTS_CIR 0x80
+#define VIA_CRDR_SDSTS_IO 0x100
+#define VIA_CRDR_SDSTS_CRD 0x200
+#define VIA_CRDR_SDSTS_CRTO 0x400
+#define VIA_CRDR_SDSTS_ASCRDIE 0x800
+#define VIA_CRDR_SDSTS_DT 0x1000
+#define VIA_CRDR_SDSTS_SC 0x2000
+#define VIA_CRDR_SDSTS_RC 0x4000
+#define VIA_CRDR_SDSTS_WC 0x8000
+
+#define VIA_CRDR_SDSTS_IGN_MASK\
+ (VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_IO)
+#define VIA_CRDR_SDSTS_INT_MASK \
+ (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_CD \
+ | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_IO | VIA_CRDR_SDSTS_CRD \
+ | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+#define VIA_CRDR_SDSTS_W1C_MASK \
+ (VIA_CRDR_SDSTS_CECC | VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD \
+ | VIA_CRDR_SDSTS_CD | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_CRD \
+ | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+#define VIA_CRDR_SDSTS_CMD_MASK \
+ (VIA_CRDR_SDSTS_CRD | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_SC)
+#define VIA_CRDR_SDSTS_DATA_MASK\
+ (VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_DT \
+ | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
+
+#define VIA_CRDR_SDSTATUS2 0x2a
+/*
+ * CFE : Enable SD host automatic Clock FReezing
+ */
+#define VIA_CRDR_SDSTS_CFE 0x80
+
+#define VIA_CRDR_SDRSPTMO 0x2C
+
+#define VIA_CRDR_SDCLKSEL 0x30
+
+#define VIA_CRDR_SDEXTCTRL 0x34
+#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SD 0x01
+#define VIS_CRDR_SDEXTCTRL_SHIFT_9 0x02
+#define VIS_CRDR_SDEXTCTRL_MMC_8BIT 0x04
+#define VIS_CRDR_SDEXTCTRL_RELD_BLK 0x08
+#define VIS_CRDR_SDEXTCTRL_BAD_CMDA 0x10
+#define VIS_CRDR_SDEXTCTRL_BAD_DATA 0x20
+#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SPI 0x40
+#define VIA_CRDR_SDEXTCTRL_HISPD 0x80
+/* 0x38-0xFF reserved */
+
+/*
+ * Data DMA Control Registers
+ */
+
+#define VIA_CRDR_DMABASEADD 0x0
+#define VIA_CRDR_DMACOUNTER 0x4
+
+#define VIA_CRDR_DMACTRL 0x8
+/*
+ * DIR :Transaction Direction
+ * 0 : From card to memory
+ * 1 : From memory to card
+ */
+#define VIA_CRDR_DMACTRL_DIR 0x100
+#define VIA_CRDR_DMACTRL_ENIRQ 0x10000
+#define VIA_CRDR_DMACTRL_SFTRST 0x1000000
+
+#define VIA_CRDR_DMASTS 0xc
+
+#define VIA_CRDR_DMASTART 0x10
+/*0x14-0xFF reserved*/
+
+/*
+ * PCI Control Registers
+ */
+
+/*0x0 - 0x1 reserved*/
+#define VIA_CRDR_PCICLKGATT 0x2
+/*
+ * SFTRST :
+ * 0 : Soft reset all the controller and it will be de-asserted automatically
+ * 1 : Soft reset is de-asserted
+ */
+#define VIA_CRDR_PCICLKGATT_SFTRST 0x01
+/*
+ * 3V3 : Pad power select
+ * 0 : 1.8V
+ * 1 : 3.3V
+ * NOTE : No mater what the actual value should be, this bit always
+ * read as 0. This is a hardware bug.
+ */
+#define VIA_CRDR_PCICLKGATT_3V3 0x10
+/*
+ * PAD_PWRON : Pad Power on/off select
+ * 0 : Power off
+ * 1 : Power on
+ * NOTE : No mater what the actual value should be, this bit always
+ * read as 0. This is a hardware bug.
+ */
+#define VIA_CRDR_PCICLKGATT_PAD_PWRON 0x20
+
+#define VIA_CRDR_PCISDCCLK 0x5
+
+#define VIA_CRDR_PCIDMACLK 0x7
+#define VIA_CRDR_PCIDMACLK_SDC 0x2
+
+#define VIA_CRDR_PCIINTCTRL 0x8
+#define VIA_CRDR_PCIINTCTRL_SDCIRQEN 0x04
+
+#define VIA_CRDR_PCIINTSTATUS 0x9
+#define VIA_CRDR_PCIINTSTATUS_SDC 0x04
+
+#define VIA_CRDR_PCITMOCTRL 0xa
+#define VIA_CRDR_PCITMOCTRL_NO 0x0
+#define VIA_CRDR_PCITMOCTRL_32US 0x1
+#define VIA_CRDR_PCITMOCTRL_256US 0x2
+#define VIA_CRDR_PCITMOCTRL_1024US 0x3
+#define VIA_CRDR_PCITMOCTRL_256MS 0x4
+#define VIA_CRDR_PCITMOCTRL_512MS 0x5
+#define VIA_CRDR_PCITMOCTRL_1024MS 0x6
+
+/*0xB-0xFF reserved*/
+
+enum PCI_HOST_CLK_CONTROL {
+ PCI_CLK_375K = 0x03,
+ PCI_CLK_8M = 0x04,
+ PCI_CLK_12M = 0x00,
+ PCI_CLK_16M = 0x05,
+ PCI_CLK_24M = 0x01,
+ PCI_CLK_33M = 0x06,
+ PCI_CLK_48M = 0x02
+};
+
+struct sdhcreg {
+ u32 sdcontrol_reg;
+ u32 sdcmdarg_reg;
+ u32 sdbusmode_reg;
+ u32 sdblklen_reg;
+ u32 sdresp_reg[4];
+ u32 sdcurblkcnt_reg;
+ u32 sdintmask_reg;
+ u32 sdstatus_reg;
+ u32 sdrsptmo_reg;
+ u32 sdclksel_reg;
+ u32 sdextctrl_reg;
+};
+
+struct pcictrlreg {
+ u8 reserve[2];
+ u8 pciclkgat_reg;
+ u8 pcinfcclk_reg;
+ u8 pcimscclk_reg;
+ u8 pcisdclk_reg;
+ u8 pcicaclk_reg;
+ u8 pcidmaclk_reg;
+ u8 pciintctrl_reg;
+ u8 pciintstatus_reg;
+ u8 pcitmoctrl_reg;
+ u8 Resv;
+};
+
+struct via_crdr_mmc_host {
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+
+ void __iomem *mmiobase;
+ void __iomem *sdhc_mmiobase;
+ void __iomem *ddma_mmiobase;
+ void __iomem *pcictrl_mmiobase;
+
+ struct pcictrlreg pm_pcictrl_reg;
+ struct sdhcreg pm_sdhc_reg;
+
+ struct work_struct carddet_work;
+ struct tasklet_struct finish_tasklet;
+
+ struct timer_list timer;
+ spinlock_t lock;
+ u8 power;
+ int reject;
+ unsigned int quirks;
+};
+
+/* some devices need a very long delay for power to stabilize */
+#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
+
+static struct pci_device_id via_ids[] = {
+ {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, via_ids);
+
+static void via_print_sdchc(struct via_crdr_mmc_host *host)
+{
+ void __iomem *addrbase = host->sdhc_mmiobase;
+
+ pr_debug("SDC MMIO Registers:\n");
+ pr_debug("SDCONTROL=%08x, SDCMDARG=%08x, SDBUSMODE=%08x\n",
+ readl(addrbase + VIA_CRDR_SDCTRL),
+ readl(addrbase + VIA_CRDR_SDCARG),
+ readl(addrbase + VIA_CRDR_SDBUSMODE));
+ pr_debug("SDBLKLEN=%08x, SDCURBLKCNT=%08x, SDINTMASK=%08x\n",
+ readl(addrbase + VIA_CRDR_SDBLKLEN),
+ readl(addrbase + VIA_CRDR_SDCURBLKCNT),
+ readl(addrbase + VIA_CRDR_SDINTMASK));
+ pr_debug("SDSTATUS=%08x, SDCLKSEL=%08x, SDEXTCTRL=%08x\n",
+ readl(addrbase + VIA_CRDR_SDSTATUS),
+ readl(addrbase + VIA_CRDR_SDCLKSEL),
+ readl(addrbase + VIA_CRDR_SDEXTCTRL));
+}
+
+static void via_print_pcictrl(struct via_crdr_mmc_host *host)
+{
+ void __iomem *addrbase = host->pcictrl_mmiobase;
+
+ pr_debug("PCI Control Registers:\n");
+ pr_debug("PCICLKGATT=%02x, PCISDCCLK=%02x, PCIDMACLK=%02x\n",
+ readb(addrbase + VIA_CRDR_PCICLKGATT),
+ readb(addrbase + VIA_CRDR_PCISDCCLK),
+ readb(addrbase + VIA_CRDR_PCIDMACLK));
+ pr_debug("PCIINTCTRL=%02x, PCIINTSTATUS=%02x\n",
+ readb(addrbase + VIA_CRDR_PCIINTCTRL),
+ readb(addrbase + VIA_CRDR_PCIINTSTATUS));
+}
+
+static void via_save_pcictrlreg(struct via_crdr_mmc_host *host)
+{
+ struct pcictrlreg *pm_pcictrl_reg;
+ void __iomem *addrbase;
+
+ pm_pcictrl_reg = &(host->pm_pcictrl_reg);
+ addrbase = host->pcictrl_mmiobase;
+
+ pm_pcictrl_reg->pciclkgat_reg = readb(addrbase + VIA_CRDR_PCICLKGATT);
+ pm_pcictrl_reg->pciclkgat_reg |=
+ VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ pm_pcictrl_reg->pcisdclk_reg = readb(addrbase + VIA_CRDR_PCISDCCLK);
+ pm_pcictrl_reg->pcidmaclk_reg = readb(addrbase + VIA_CRDR_PCIDMACLK);
+ pm_pcictrl_reg->pciintctrl_reg = readb(addrbase + VIA_CRDR_PCIINTCTRL);
+ pm_pcictrl_reg->pciintstatus_reg =
+ readb(addrbase + VIA_CRDR_PCIINTSTATUS);
+ pm_pcictrl_reg->pcitmoctrl_reg = readb(addrbase + VIA_CRDR_PCITMOCTRL);
+}
+
+static void via_restore_pcictrlreg(struct via_crdr_mmc_host *host)
+{
+ struct pcictrlreg *pm_pcictrl_reg;
+ void __iomem *addrbase;
+
+ pm_pcictrl_reg = &(host->pm_pcictrl_reg);
+ addrbase = host->pcictrl_mmiobase;
+
+ writeb(pm_pcictrl_reg->pciclkgat_reg, addrbase + VIA_CRDR_PCICLKGATT);
+ writeb(pm_pcictrl_reg->pcisdclk_reg, addrbase + VIA_CRDR_PCISDCCLK);
+ writeb(pm_pcictrl_reg->pcidmaclk_reg, addrbase + VIA_CRDR_PCIDMACLK);
+ writeb(pm_pcictrl_reg->pciintctrl_reg, addrbase + VIA_CRDR_PCIINTCTRL);
+ writeb(pm_pcictrl_reg->pciintstatus_reg,
+ addrbase + VIA_CRDR_PCIINTSTATUS);
+ writeb(pm_pcictrl_reg->pcitmoctrl_reg, addrbase + VIA_CRDR_PCITMOCTRL);
+}
+
+static void via_save_sdcreg(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhc_reg;
+ void __iomem *addrbase;
+
+ pm_sdhc_reg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ pm_sdhc_reg->sdcontrol_reg = readl(addrbase + VIA_CRDR_SDCTRL);
+ pm_sdhc_reg->sdcmdarg_reg = readl(addrbase + VIA_CRDR_SDCARG);
+ pm_sdhc_reg->sdbusmode_reg = readl(addrbase + VIA_CRDR_SDBUSMODE);
+ pm_sdhc_reg->sdblklen_reg = readl(addrbase + VIA_CRDR_SDBLKLEN);
+ pm_sdhc_reg->sdcurblkcnt_reg = readl(addrbase + VIA_CRDR_SDCURBLKCNT);
+ pm_sdhc_reg->sdintmask_reg = readl(addrbase + VIA_CRDR_SDINTMASK);
+ pm_sdhc_reg->sdstatus_reg = readl(addrbase + VIA_CRDR_SDSTATUS);
+ pm_sdhc_reg->sdrsptmo_reg = readl(addrbase + VIA_CRDR_SDRSPTMO);
+ pm_sdhc_reg->sdclksel_reg = readl(addrbase + VIA_CRDR_SDCLKSEL);
+ pm_sdhc_reg->sdextctrl_reg = readl(addrbase + VIA_CRDR_SDEXTCTRL);
+}
+
+static void via_restore_sdcreg(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhc_reg;
+ void __iomem *addrbase;
+
+ pm_sdhc_reg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ writel(pm_sdhc_reg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
+ writel(pm_sdhc_reg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
+ writel(pm_sdhc_reg->sdbusmode_reg, addrbase + VIA_CRDR_SDBUSMODE);
+ writel(pm_sdhc_reg->sdblklen_reg, addrbase + VIA_CRDR_SDBLKLEN);
+ writel(pm_sdhc_reg->sdcurblkcnt_reg, addrbase + VIA_CRDR_SDCURBLKCNT);
+ writel(pm_sdhc_reg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
+ writel(pm_sdhc_reg->sdstatus_reg, addrbase + VIA_CRDR_SDSTATUS);
+ writel(pm_sdhc_reg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
+ writel(pm_sdhc_reg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
+ writel(pm_sdhc_reg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
+}
+
+static void via_pwron_sleep(struct via_crdr_mmc_host *sdhost)
+{
+ if (sdhost->quirks & VIA_CRDR_QUIRK_300MS_PWRDELAY)
+ msleep(300);
+ else
+ msleep(3);
+}
+
+static void via_set_ddma(struct via_crdr_mmc_host *host,
+ dma_addr_t dmaaddr, u32 count, int dir, int enirq)
+{
+ void __iomem *addrbase;
+ u32 ctrl_data = 0;
+
+ if (enirq)
+ ctrl_data |= VIA_CRDR_DMACTRL_ENIRQ;
+
+ if (dir)
+ ctrl_data |= VIA_CRDR_DMACTRL_DIR;
+
+ addrbase = host->ddma_mmiobase;
+
+ writel(dmaaddr, addrbase + VIA_CRDR_DMABASEADD);
+ writel(count, addrbase + VIA_CRDR_DMACOUNTER);
+ writel(ctrl_data, addrbase + VIA_CRDR_DMACTRL);
+ writel(0x01, addrbase + VIA_CRDR_DMASTART);
+
+ /* It seems that our DMA can not work normally with 375kHz clock */
+ /* FIXME: don't brute-force 8MHz but use PIO at 375kHz !! */
+ addrbase = host->pcictrl_mmiobase;
+ if (readb(addrbase + VIA_CRDR_PCISDCCLK) == PCI_CLK_375K) {
+ dev_info(host->mmc->parent, "forcing card speed to 8MHz\n");
+ writeb(PCI_CLK_8M, addrbase + VIA_CRDR_PCISDCCLK);
+ }
+}
+
+static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
+ struct mmc_data *data)
+{
+ void __iomem *addrbase;
+ u32 blk_reg;
+ int count;
+
+ WARN_ON(host->data);
+
+ /* Sanity checks */
+ BUG_ON(data->blksz > host->mmc->max_blk_size);
+ BUG_ON(data->blocks > host->mmc->max_blk_count);
+
+ host->data = data;
+
+ count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_READ) ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+ BUG_ON(count != 1);
+
+ via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
+ (data->flags & MMC_DATA_WRITE) ? 1 : 0, 1);
+
+ addrbase = host->sdhc_mmiobase;
+
+ blk_reg = data->blksz - 1;
+ blk_reg |= VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ blk_reg |= (data->blocks) << 16;
+
+ writel(blk_reg, addrbase + VIA_CRDR_SDBLKLEN);
+}
+
+static void via_sdc_get_response(struct via_crdr_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ void __iomem *addrbase = host->sdhc_mmiobase;
+ u32 dwdata0 = readl(addrbase + VIA_CRDR_SDRESP0);
+ u32 dwdata1 = readl(addrbase + VIA_CRDR_SDRESP1);
+ u32 dwdata2 = readl(addrbase + VIA_CRDR_SDRESP2);
+ u32 dwdata3 = readl(addrbase + VIA_CRDR_SDRESP3);
+
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[0] = ((u8) (dwdata1)) |
+ (((u8) (dwdata0 >> 24)) << 8) |
+ (((u8) (dwdata0 >> 16)) << 16) |
+ (((u8) (dwdata0 >> 8)) << 24);
+
+ cmd->resp[1] = ((u8) (dwdata2)) |
+ (((u8) (dwdata1 >> 24)) << 8) |
+ (((u8) (dwdata1 >> 16)) << 16) |
+ (((u8) (dwdata1 >> 8)) << 24);
+
+ cmd->resp[2] = ((u8) (dwdata3)) |
+ (((u8) (dwdata2 >> 24)) << 8) |
+ (((u8) (dwdata2 >> 16)) << 16) |
+ (((u8) (dwdata2 >> 8)) << 24);
+
+ cmd->resp[3] = 0xff |
+ ((((u8) (dwdata3 >> 24))) << 8) |
+ (((u8) (dwdata3 >> 16)) << 16) |
+ (((u8) (dwdata3 >> 8)) << 24);
+ } else {
+ dwdata0 >>= 8;
+ cmd->resp[0] = ((dwdata0 & 0xff) << 24) |
+ (((dwdata0 >> 8) & 0xff) << 16) |
+ (((dwdata0 >> 16) & 0xff) << 8) | (dwdata1 & 0xff);
+
+ dwdata1 >>= 8;
+ cmd->resp[1] = ((dwdata1 & 0xff) << 24) |
+ (((dwdata1 >> 8) & 0xff) << 16) |
+ (((dwdata1 >> 16) & 0xff) << 8);
+ }
+}
+
+static void via_sdc_send_command(struct via_crdr_mmc_host *host,
+ struct mmc_command *cmd)
+{
+ void __iomem *addrbase;
+ struct mmc_data *data;
+ u32 cmdctrl = 0;
+
+ WARN_ON(host->cmd);
+
+ data = cmd->data;
+ mod_timer(&host->timer, jiffies + HZ);
+ host->cmd = cmd;
+
+ /*Command index*/
+ cmdctrl = cmd->opcode << 8;
+
+ /*Response type*/
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_NONE;
+ break;
+ case MMC_RSP_R1:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1;
+ break;
+ case MMC_RSP_R1B:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1B;
+ break;
+ case MMC_RSP_R2:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R2;
+ break;
+ case MMC_RSP_R3:
+ cmdctrl |= VIA_CRDR_SDCTRL_RSP_R3;
+ break;
+ default:
+ pr_err("%s: cmd->flag is not valid\n", mmc_hostname(host->mmc));
+ break;
+ }
+
+ if (!(cmd->data))
+ goto nodata;
+
+ via_sdc_preparedata(host, data);
+
+ /*Command control*/
+ if (data->blocks > 1) {
+ if (data->flags & MMC_DATA_WRITE) {
+ cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
+ cmdctrl |= VIA_CRDR_SDCTRL_MULTI_WR;
+ } else {
+ cmdctrl |= VIA_CRDR_SDCTRL_MULTI_RD;
+ }
+ } else {
+ if (data->flags & MMC_DATA_WRITE) {
+ cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
+ cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_WR;
+ } else {
+ cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_RD;
+ }
+ }
+
+nodata:
+ if (cmd == host->mrq->stop)
+ cmdctrl |= VIA_CRDR_SDCTRL_STOP;
+
+ cmdctrl |= VIA_CRDR_SDCTRL_START;
+
+ addrbase = host->sdhc_mmiobase;
+ writel(cmd->arg, addrbase + VIA_CRDR_SDCARG);
+ writel(cmdctrl, addrbase + VIA_CRDR_SDCTRL);
+}
+
+static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
+{
+ struct mmc_data *data;
+
+ BUG_ON(!host->data);
+
+ data = host->data;
+ host->data = NULL;
+
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blocks * data->blksz;
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_READ) ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+ if (data->stop)
+ via_sdc_send_command(host, data->stop);
+ else
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
+{
+ via_sdc_get_response(host, host->cmd);
+
+ host->cmd->error = 0;
+
+ if (!host->cmd->data)
+ tasklet_schedule(&host->finish_tasklet);
+
+ host->cmd = NULL;
+}
+
+static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ void __iomem *addrbase;
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ u16 status;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->pcictrl_mmiobase;
+ writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+
+ WARN_ON(host->mrq != NULL);
+ host->mrq = mrq;
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+ if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) {
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ } else {
+ via_sdc_send_command(host, mrq->cmd);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void via_sdc_set_power(struct via_crdr_mmc_host *host,
+ unsigned short power, unsigned int on)
+{
+ unsigned long flags;
+ u8 gatt;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ host->power = (1 << power);
+
+ gatt = readb(host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ if (host->power == MMC_VDD_165_195)
+ gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ if (on)
+ gatt |= VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ else
+ gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_pwron_sleep(host);
+}
+
+static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ void __iomem *addrbase;
+ u32 org_data, sdextctrl;
+ u8 clock;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->sdhc_mmiobase;
+ org_data = readl(addrbase + VIA_CRDR_SDBUSMODE);
+ sdextctrl = readl(addrbase + VIA_CRDR_SDEXTCTRL);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_1)
+ org_data &= ~VIA_CRDR_SDMODE_4BIT;
+ else
+ org_data |= VIA_CRDR_SDMODE_4BIT;
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ org_data &= ~VIA_CRDR_SDMODE_CLK_ON;
+ else
+ org_data |= VIA_CRDR_SDMODE_CLK_ON;
+
+ if (ios->timing == MMC_TIMING_SD_HS)
+ sdextctrl |= VIA_CRDR_SDEXTCTRL_HISPD;
+ else
+ sdextctrl &= ~VIA_CRDR_SDEXTCTRL_HISPD;
+
+ writel(org_data, addrbase + VIA_CRDR_SDBUSMODE);
+ writel(sdextctrl, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ if (ios->clock >= 48000000)
+ clock = PCI_CLK_48M;
+ else if (ios->clock >= 33000000)
+ clock = PCI_CLK_33M;
+ else if (ios->clock >= 24000000)
+ clock = PCI_CLK_24M;
+ else if (ios->clock >= 16000000)
+ clock = PCI_CLK_16M;
+ else if (ios->clock >= 12000000)
+ clock = PCI_CLK_12M;
+ else if (ios->clock >= 8000000)
+ clock = PCI_CLK_8M;
+ else
+ clock = PCI_CLK_375K;
+
+ addrbase = host->pcictrl_mmiobase;
+ if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
+ writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (ios->power_mode != MMC_POWER_OFF)
+ via_sdc_set_power(host, ios->vdd, 1);
+ else
+ via_sdc_set_power(host, ios->vdd, 0);
+}
+
+static int via_sdc_get_ro(struct mmc_host *mmc)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ u16 status;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return !(status & VIA_CRDR_SDSTS_WP);
+}
+
+static const struct mmc_host_ops via_sdc_ops = {
+ .request = via_sdc_request,
+ .set_ios = via_sdc_set_ios,
+ .get_ro = via_sdc_get_ro,
+};
+
+static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
+{
+ void __iomem *addrbase;
+ unsigned long flags;
+ u8 gatt;
+
+ addrbase = host->pcictrl_mmiobase;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ via_save_pcictrlreg(host);
+ via_save_sdcreg(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ if (host->power == MMC_VDD_165_195)
+ gatt &= VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(host);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ via_restore_pcictrlreg(host);
+ via_restore_sdcreg(host);
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->cmd) {
+ pr_err("%s: Got command interrupt 0x%x even "
+ "though no command operation was in progress.\n",
+ mmc_hostname(host->mmc), intmask);
+ return;
+ }
+
+ if (intmask & VIA_CRDR_SDSTS_CRTO)
+ host->cmd->error = -ETIMEDOUT;
+ else if (intmask & VIA_CRDR_SDSTS_SC)
+ host->cmd->error = -EILSEQ;
+
+ if (host->cmd->error)
+ tasklet_schedule(&host->finish_tasklet);
+ else if (intmask & VIA_CRDR_SDSTS_CRD)
+ via_sdc_finish_command(host);
+}
+
+static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (intmask & VIA_CRDR_SDSTS_DT)
+ host->data->error = -ETIMEDOUT;
+ else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
+ host->data->error = -EILSEQ;
+
+ via_sdc_finish_data(host);
+}
+
+static irqreturn_t via_sdc_isr(int irq, void *dev_id)
+{
+ struct via_crdr_mmc_host *sdhost = dev_id;
+ void __iomem *addrbase;
+ u8 pci_status;
+ u16 sd_status;
+ irqreturn_t result;
+
+ if (!sdhost)
+ return IRQ_NONE;
+
+ spin_lock(&sdhost->lock);
+
+ addrbase = sdhost->pcictrl_mmiobase;
+ pci_status = readb(addrbase + VIA_CRDR_PCIINTSTATUS);
+ if (!(pci_status & VIA_CRDR_PCIINTSTATUS_SDC)) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+ addrbase = sdhost->sdhc_mmiobase;
+ sd_status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ sd_status &= VIA_CRDR_SDSTS_INT_MASK;
+ sd_status &= ~VIA_CRDR_SDSTS_IGN_MASK;
+ if (!sd_status) {
+ result = IRQ_NONE;
+ goto out;
+ }
+
+ if (sd_status & VIA_CRDR_SDSTS_CIR) {
+ writew(sd_status & VIA_CRDR_SDSTS_CIR,
+ addrbase + VIA_CRDR_SDSTATUS);
+
+ schedule_work(&sdhost->carddet_work);
+ }
+
+ sd_status &= ~VIA_CRDR_SDSTS_CIR;
+ if (sd_status & VIA_CRDR_SDSTS_CMD_MASK) {
+ writew(sd_status & VIA_CRDR_SDSTS_CMD_MASK,
+ addrbase + VIA_CRDR_SDSTATUS);
+ via_sdc_cmd_isr(sdhost, sd_status & VIA_CRDR_SDSTS_CMD_MASK);
+ }
+ if (sd_status & VIA_CRDR_SDSTS_DATA_MASK) {
+ writew(sd_status & VIA_CRDR_SDSTS_DATA_MASK,
+ addrbase + VIA_CRDR_SDSTATUS);
+ via_sdc_data_isr(sdhost, sd_status & VIA_CRDR_SDSTS_DATA_MASK);
+ }
+
+ sd_status &= ~(VIA_CRDR_SDSTS_CMD_MASK | VIA_CRDR_SDSTS_DATA_MASK);
+ if (sd_status) {
+ pr_err("%s: Unexpected interrupt 0x%x\n",
+ mmc_hostname(sdhost->mmc), sd_status);
+ writew(sd_status, addrbase + VIA_CRDR_SDSTATUS);
+ }
+
+ result = IRQ_HANDLED;
+
+ mmiowb();
+out:
+ spin_unlock(&sdhost->lock);
+
+ return result;
+}
+
+static void via_sdc_timeout(unsigned long ulongdata)
+{
+ struct via_crdr_mmc_host *sdhost;
+ unsigned long flags;
+
+ sdhost = (struct via_crdr_mmc_host *)ulongdata;
+
+ spin_lock_irqsave(&sdhost->lock, flags);
+
+ if (sdhost->mrq) {
+ pr_err("%s: Timeout waiting for hardware interrupt."
+ "cmd:0x%x\n", mmc_hostname(sdhost->mmc),
+ sdhost->mrq->cmd->opcode);
+
+ if (sdhost->data) {
+ writel(VIA_CRDR_DMACTRL_SFTRST,
+ sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
+ sdhost->data->error = -ETIMEDOUT;
+ via_sdc_finish_data(sdhost);
+ } else {
+ if (sdhost->cmd)
+ sdhost->cmd->error = -ETIMEDOUT;
+ else
+ sdhost->mrq->cmd->error = -ETIMEDOUT;
+ tasklet_schedule(&sdhost->finish_tasklet);
+ }
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&sdhost->lock, flags);
+}
+
+static void via_sdc_tasklet_finish(unsigned long param)
+{
+ struct via_crdr_mmc_host *host;
+ unsigned long flags;
+ struct mmc_request *mrq;
+
+ host = (struct via_crdr_mmc_host *)param;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ del_timer(&host->timer);
+ mrq = host->mrq;
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void via_sdc_card_detect(struct work_struct *work)
+{
+ struct via_crdr_mmc_host *host;
+ void __iomem *addrbase;
+ unsigned long flags;
+ u16 status;
+
+ host = container_of(work, struct via_crdr_mmc_host, carddet_work);
+
+ addrbase = host->ddma_mmiobase;
+ writel(VIA_CRDR_DMACTRL_SFTRST, addrbase + VIA_CRDR_DMACTRL);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ addrbase = host->pcictrl_mmiobase;
+ writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
+
+ addrbase = host->sdhc_mmiobase;
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ if (!(status & VIA_CRDR_SDSTS_SLOTG)) {
+ if (host->mrq) {
+ pr_err("%s: Card removed during transfer!\n",
+ mmc_hostname(host->mmc));
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_reset_pcictrl(host);
+
+ spin_lock_irqsave(&host->lock, flags);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ via_print_pcictrl(host);
+ via_print_sdchc(host);
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(500));
+}
+
+static void via_init_mmc_host(struct via_crdr_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ void __iomem *addrbase;
+ u32 lenreg;
+ u32 status;
+
+ init_timer(&host->timer);
+ host->timer.data = (unsigned long)host;
+ host->timer.function = via_sdc_timeout;
+
+ spin_lock_init(&host->lock);
+
+ mmc->f_min = VIA_CRDR_MIN_CLOCK;
+ mmc->f_max = VIA_CRDR_MAX_CLOCK;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED;
+ mmc->ops = &via_sdc_ops;
+
+ /*Hardware cannot do scatter lists*/
+ mmc->max_hw_segs = 1;
+ mmc->max_phys_segs = 1;
+
+ mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
+ mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
+
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_seg_size;
+
+ INIT_WORK(&host->carddet_work, via_sdc_card_detect);
+
+ tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish,
+ (unsigned long)host);
+
+ addrbase = host->sdhc_mmiobase;
+ writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
+ msleep(1);
+
+ lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS2);
+ status |= VIA_CRDR_SDSTS_CFE;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS2);
+
+ writeb(0x0, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ writel(VIA_CRDR_SDACTIVE_INTMASK, addrbase + VIA_CRDR_SDINTMASK);
+ msleep(1);
+}
+
+static int __devinit via_sd_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
+{
+ struct mmc_host *mmc;
+ struct via_crdr_mmc_host *sdhost;
+ u32 base, len;
+ u8 rev, gatt;
+ int ret;
+
+ pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);
+ pr_info(DRV_NAME
+ ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
+ pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
+ (int)rev);
+
+ ret = pci_enable_device(pcidev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pcidev, DRV_NAME);
+ if (ret)
+ goto disable;
+
+ pci_write_config_byte(pcidev, VIA_CRDR_PCI_WORK_MODE, 0);
+ pci_write_config_byte(pcidev, VIA_CRDR_PCI_DBG_MODE, 0);
+
+ mmc = mmc_alloc_host(sizeof(struct via_crdr_mmc_host), &pcidev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ sdhost = mmc_priv(mmc);
+ sdhost->mmc = mmc;
+ dev_set_drvdata(&pcidev->dev, sdhost);
+
+ len = pci_resource_len(pcidev, 0);
+ base = pci_resource_start(pcidev, 0);
+ sdhost->mmiobase = ioremap_nocache(base, len);
+ if (!sdhost->mmiobase) {
+ ret = -ENOMEM;
+ goto free_mmc_host;
+ }
+
+ sdhost->sdhc_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_SDC_OFF;
+ sdhost->ddma_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_DDMA_OFF;
+ sdhost->pcictrl_mmiobase =
+ sdhost->mmiobase + VIA_CRDR_PCICTRL_OFF;
+
+ sdhost->power = MMC_VDD_165_195;
+
+ gatt = VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(sdhost);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ via_init_mmc_host(sdhost);
+
+ ret =
+ request_irq(pcidev->irq, via_sdc_isr, IRQF_SHARED, DRV_NAME,
+ sdhost);
+ if (ret)
+ goto unmap;
+
+ writeb(VIA_CRDR_PCIINTCTRL_SDCIRQEN,
+ sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
+ writeb(VIA_CRDR_PCITMOCTRL_1024MS,
+ sdhost->pcictrl_mmiobase + VIA_CRDR_PCITMOCTRL);
+
+ /* device-specific quirks */
+ if (pcidev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
+ pcidev->subsystem_device == 0x3891)
+ sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
+
+ mmc_add_host(mmc);
+
+ return 0;
+
+unmap:
+ iounmap(sdhost->mmiobase);
+free_mmc_host:
+ dev_set_drvdata(&pcidev->dev, NULL);
+ mmc_free_host(mmc);
+release:
+ pci_release_regions(pcidev);
+disable:
+ pci_disable_device(pcidev);
+
+ return ret;
+}
+
+static void __devexit via_sd_remove(struct pci_dev *pcidev)
+{
+ struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev);
+ unsigned long flags;
+ u8 gatt;
+
+ spin_lock_irqsave(&sdhost->lock, flags);
+
+ /* Ensure we don't accept more commands from mmc layer */
+ sdhost->reject = 1;
+
+ /* Disable generating further interrupts */
+ writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
+ mmiowb();
+
+ if (sdhost->mrq) {
+ printk(KERN_ERR "%s: Controller removed during "
+ "transfer\n", mmc_hostname(sdhost->mmc));
+
+ /* make sure all DMA is stopped */
+ writel(VIA_CRDR_DMACTRL_SFTRST,
+ sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
+ mmiowb();
+ sdhost->mrq->cmd->error = -ENOMEDIUM;
+ if (sdhost->mrq->stop)
+ sdhost->mrq->stop->error = -ENOMEDIUM;
+ tasklet_schedule(&sdhost->finish_tasklet);
+ }
+ spin_unlock_irqrestore(&sdhost->lock, flags);
+
+ mmc_remove_host(sdhost->mmc);
+
+ free_irq(pcidev->irq, sdhost);
+
+ del_timer_sync(&sdhost->timer);
+
+ tasklet_kill(&sdhost->finish_tasklet);
+
+ /* switch off power */
+ gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+
+ iounmap(sdhost->mmiobase);
+ dev_set_drvdata(&pcidev->dev, NULL);
+ mmc_free_host(sdhost->mmc);
+ pci_release_regions(pcidev);
+ pci_disable_device(pcidev);
+
+ pr_info(DRV_NAME
+ ": VIA SDMMC controller at %s [%04x:%04x] has been removed\n",
+ pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
+}
+
+#ifdef CONFIG_PM
+
+static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
+{
+ struct sdhcreg *pm_sdhcreg;
+ void __iomem *addrbase;
+ u32 lenreg;
+ u16 status;
+
+ pm_sdhcreg = &(host->pm_sdhc_reg);
+ addrbase = host->sdhc_mmiobase;
+
+ writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
+
+ lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
+ writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS);
+ status &= VIA_CRDR_SDSTS_W1C_MASK;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS);
+
+ status = readw(addrbase + VIA_CRDR_SDSTATUS2);
+ status |= VIA_CRDR_SDSTS_CFE;
+ writew(status, addrbase + VIA_CRDR_SDSTATUS2);
+
+ writel(pm_sdhcreg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
+ writel(pm_sdhcreg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
+ writel(pm_sdhcreg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
+ writel(pm_sdhcreg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
+ writel(pm_sdhcreg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
+ writel(pm_sdhcreg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
+
+ via_print_pcictrl(host);
+ via_print_sdchc(host);
+}
+
+static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
+{
+ struct via_crdr_mmc_host *host;
+ int ret = 0;
+
+ host = pci_get_drvdata(pcidev);
+
+ via_save_pcictrlreg(host);
+ via_save_sdcreg(host);
+
+ ret = mmc_suspend_host(host->mmc, state);
+
+ pci_save_state(pcidev);
+ pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
+ pci_disable_device(pcidev);
+ pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
+
+ return ret;
+}
+
+static int via_sd_resume(struct pci_dev *pcidev)
+{
+ struct via_crdr_mmc_host *sdhost;
+ int ret = 0;
+ u8 gatt;
+
+ sdhost = pci_get_drvdata(pcidev);
+
+ gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
+ if (sdhost->power == MMC_VDD_165_195)
+ gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
+ else
+ gatt |= VIA_CRDR_PCICLKGATT_3V3;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ via_pwron_sleep(sdhost);
+ gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
+ writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
+ msleep(3);
+
+ msleep(100);
+
+ pci_set_power_state(pcidev, PCI_D0);
+ pci_restore_state(pcidev);
+ ret = pci_enable_device(pcidev);
+ if (ret)
+ return ret;
+
+ via_restore_pcictrlreg(sdhost);
+ via_init_sdc_pm(sdhost);
+
+ ret = mmc_resume_host(sdhost->mmc);
+
+ return ret;
+}
+
+#else /* CONFIG_PM */
+
+#define via_sd_suspend NULL
+#define via_sd_resume NULL
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver via_sd_driver = {
+ .name = DRV_NAME,
+ .id_table = via_ids,
+ .probe = via_sd_probe,
+ .remove = __devexit_p(via_sd_remove),
+ .suspend = via_sd_suspend,
+ .resume = via_sd_resume,
+};
+
+static int __init via_sd_drv_init(void)
+{
+ pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver "
+ "(C) 2008 VIA Technologies, Inc.\n");
+
+ return pci_register_driver(&via_sd_driver);
+}
+
+static void __exit via_sd_drv_exit(void)
+{
+ pci_unregister_driver(&via_sd_driver);
+}
+
+module_init(via_sd_drv_init);
+module_exit(via_sd_drv_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("VIA Technologies Inc.");
+MODULE_DESCRIPTION("VIA SD/MMC Card Interface driver");
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c240454..8664fee 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -46,6 +46,7 @@
#define MANUFACTURER_INTEL 0x0089
#define I82802AB 0x00ad
#define I82802AC 0x00ac
+#define PF38F4476 0x881c
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -315,10 +316,20 @@
{ 0, 0, NULL, NULL }
};
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_intelext *extp)
+{
+ if (cfi->mfr == MANUFACTURER_INTEL &&
+ cfi->id == PF38F4476 && extp->MinorVersion == '3')
+ extp->MinorVersion = '1';
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
+ struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp;
+ unsigned int extra_size = 0;
unsigned int extp_size = sizeof(*extp);
again:
@@ -326,6 +337,8 @@
if (!extp)
return NULL;
+ cfi_fixup_major_minor(cfi, extp);
+
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
@@ -340,19 +353,24 @@
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
- if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
- unsigned int extra_size = 0;
- int nb_parts, i;
+ if (extp->MinorVersion >= '0') {
+ extra_size = 0;
/* Protection Register info */
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
+ }
+ if (extp->MinorVersion >= '1') {
/* Burst Read info */
extra_size += 2;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
- extra_size += extp->extra[extra_size-1];
+ extra_size += extp->extra[extra_size - 1];
+ }
+
+ if (extp->MinorVersion >= '3') {
+ int nb_parts, i;
/* Number of hardware-partitions */
extra_size += 1;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index e824b9b..ccc4cfc 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -166,6 +166,7 @@
#define SST39LF040 0x00D7
#define SST39SF010A 0x00B5
#define SST39SF020A 0x00B6
+#define SST39SF040 0x00B7
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -1393,6 +1394,18 @@
}
}, {
.mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39SF040,
+ .name = "SST 39SF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cc6369e..59c4612 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -500,6 +500,9 @@
{ "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
{ "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
+ /* Macronix */
+ { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
+
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
@@ -528,6 +531,7 @@
{ "m25p64", 0x202017, 0, 64 * 1024, 128, },
{ "m25p128", 0x202018, 0, 256 * 1024, 64, },
+ { "m45pe10", 0x204011, 0, 64 * 1024, 2, },
{ "m45pe80", 0x204014, 0, 64 * 1024, 16, },
{ "m45pe16", 0x204015, 0, 64 * 1024, 32, },
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 82923bd..0b98654 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -105,15 +105,6 @@
default "0x02000000"
depends on MSP_FLASH_MAP_LIMIT_32M
-config MTD_PMC_MSP_RAMROOT
- tristate "Embedded RAM block device for root on PMC-Sierra MSP"
- depends on PMC_MSP_EMBEDDED_ROOTFS && \
- (MTD_BLOCK || MTD_BLOCK_RO) && \
- MTD_RAM
- help
- This provides support for the embedded root file system
- on PMC MSP devices. This memory is mapped as a MTD block device.
-
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
depends on SPARC && MTD_CFI && PCI
@@ -270,7 +261,7 @@
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT
+ depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -501,7 +492,7 @@
If compiled as a module, it will be called bfin-async-flash.
config MTD_UCLINUX
- tristate "Generic uClinux RAM/ROM filesystem support"
+ bool "Generic uClinux RAM/ROM filesystem support"
depends on MTD_PARTITIONS && MTD_RAM && !MMU
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 2dbc1be..8bae7f9 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -25,7 +25,6 @@
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
-obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 576611f..365c77b 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -40,6 +40,9 @@
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
};
static void switch_to_flash(struct async_state *state)
@@ -170,6 +173,7 @@
if (ret > 0) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
add_mtd_partitions(state->mtd, pdata->parts, ret);
+ state->parts = pdata->parts;
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
@@ -193,6 +197,7 @@
gpio_free(state->enet_flash_pin);
#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(state->mtd);
+ kfree(state->parts);
#endif
map_destroy(state->mtd);
kfree(state);
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index c9681a3..b08a798 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -36,27 +36,33 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <asm/mach/flash.h>
#include <mach/hardware.h>
#include <asm/system.h>
-#ifdef CONFIG_ARCH_P720T
-#define FLASH_BASE (0x04000000)
-#define FLASH_SIZE (64*1024*1024)
-#endif
+#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
+
+struct armflash_subdev_info {
+ char name[SUBDEV_NAME_SIZE];
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct flash_platform_data *plat;
+};
struct armflash_info {
- struct flash_platform_data *plat;
struct resource *res;
struct mtd_partition *parts;
struct mtd_info *mtd;
- struct map_info map;
+ int nr_subdev;
+ struct armflash_subdev_info subdev[0];
};
static void armflash_set_vpp(struct map_info *map, int on)
{
- struct armflash_info *info = container_of(map, struct armflash_info, map);
+ struct armflash_subdev_info *info =
+ container_of(map, struct armflash_subdev_info, map);
if (info->plat && info->plat->set_vpp)
info->plat->set_vpp(on);
@@ -64,32 +70,17 @@
static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL };
-static int armflash_probe(struct platform_device *dev)
+static int armflash_subdev_probe(struct armflash_subdev_info *subdev,
+ struct resource *res)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
- struct resource *res = dev->resource;
- unsigned int size = res->end - res->start + 1;
- struct armflash_info *info;
- int err;
+ struct flash_platform_data *plat = subdev->plat;
+ resource_size_t size = res->end - res->start + 1;
void __iomem *base;
+ int err = 0;
- info = kzalloc(sizeof(struct armflash_info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->plat = plat;
- if (plat && plat->init) {
- err = plat->init();
- if (err)
- goto no_resource;
- }
-
- info->res = request_mem_region(res->start, size, "armflash");
- if (!info->res) {
+ if (!request_mem_region(res->start, size, subdev->name)) {
err = -EBUSY;
- goto no_resource;
+ goto out;
}
base = ioremap(res->start, size);
@@ -101,27 +92,132 @@
/*
* look for CFI based flash parts fitted to this board
*/
- info->map.size = size;
- info->map.bankwidth = plat->width;
- info->map.phys = res->start;
- info->map.virt = base;
- info->map.name = dev_name(&dev->dev);
- info->map.set_vpp = armflash_set_vpp;
+ subdev->map.size = size;
+ subdev->map.bankwidth = plat->width;
+ subdev->map.phys = res->start;
+ subdev->map.virt = base;
+ subdev->map.name = subdev->name;
+ subdev->map.set_vpp = armflash_set_vpp;
- simple_map_init(&info->map);
+ simple_map_init(&subdev->map);
/*
* Also, the CFI layer automatically works out what size
* of chips we have, and does the necessary identification
* for us automatically.
*/
- info->mtd = do_map_probe(plat->map_name, &info->map);
- if (!info->mtd) {
+ subdev->mtd = do_map_probe(plat->map_name, &subdev->map);
+ if (!subdev->mtd) {
err = -ENXIO;
goto no_device;
}
- info->mtd->owner = THIS_MODULE;
+ subdev->mtd->owner = THIS_MODULE;
+
+ /* Successful? */
+ if (err == 0)
+ return err;
+
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ no_device:
+ iounmap(base);
+ no_mem:
+ release_mem_region(res->start, size);
+ out:
+ return err;
+}
+
+static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
+{
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ if (subdev->map.virt)
+ iounmap(subdev->map.virt);
+ release_mem_region(subdev->map.phys, subdev->map.size);
+}
+
+static int armflash_probe(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ unsigned int size;
+ struct armflash_info *info;
+ int i, nr, err;
+
+ /* Count the number of devices */
+ for (nr = 0; ; nr++)
+ if (!platform_get_resource(dev, IORESOURCE_MEM, nr))
+ break;
+ if (nr == 0) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ size = sizeof(struct armflash_info) +
+ sizeof(struct armflash_subdev_info) * nr;
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (plat && plat->init) {
+ err = plat->init();
+ if (err)
+ goto no_resource;
+ }
+
+ for (i = 0; i < nr; i++) {
+ struct armflash_subdev_info *subdev = &info->subdev[i];
+ struct resource *res;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ if (nr == 1)
+ /* No MTD concatenation, just use the default name */
+ snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s",
+ dev_name(&dev->dev));
+ else
+ snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d",
+ dev_name(&dev->dev), i);
+ subdev->plat = plat;
+
+ err = armflash_subdev_probe(subdev, res);
+ if (err)
+ break;
+ }
+ info->nr_subdev = i;
+
+ if (err)
+ goto subdev_err;
+
+ if (info->nr_subdev == 1)
+ info->mtd = info->subdev[0].mtd;
+ else if (info->nr_subdev > 1) {
+#ifdef CONFIG_MTD_CONCAT
+ struct mtd_info *cdev[info->nr_subdev];
+
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ for (i = 0; i < info->nr_subdev; i++)
+ cdev[i] = info->subdev[i].mtd;
+
+ info->mtd = mtd_concat_create(cdev, info->nr_subdev,
+ dev_name(&dev->dev));
+ if (info->mtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "armflash: multiple devices found but "
+ "MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
+ }
+
+ if (err < 0)
+ goto cleanup;
err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0);
if (err > 0) {
@@ -131,28 +227,30 @@
"mtd partition registration failed: %d\n", err);
}
- if (err == 0)
+ if (err == 0) {
platform_set_drvdata(dev, info);
+ return err;
+ }
/*
- * If we got an error, free all resources.
+ * We got an error, free all resources.
*/
- if (err < 0) {
- if (info->mtd) {
- del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
- }
- kfree(info->parts);
-
- no_device:
- iounmap(base);
- no_mem:
- release_mem_region(res->start, size);
- no_resource:
- if (plat && plat->exit)
- plat->exit();
- kfree(info);
+ cleanup:
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
+ kfree(info->parts);
+ subdev_err:
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
+ no_resource:
+ if (plat && plat->exit)
+ plat->exit();
+ kfree(info);
out:
return err;
}
@@ -160,22 +258,26 @@
static int armflash_remove(struct platform_device *dev)
{
struct armflash_info *info = platform_get_drvdata(dev);
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ int i;
platform_set_drvdata(dev, NULL);
if (info) {
if (info->mtd) {
del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
kfree(info->parts);
- iounmap(info->map.virt);
- release_resource(info->res);
- kfree(info->res);
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
- if (info->plat && info->plat->exit)
- info->plat->exit();
+ if (plat && plat->exit)
+ plat->exit();
kfree(info);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 29a9011..380648e 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -195,42 +195,6 @@
}
#ifdef CONFIG_PM
-static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend) {
- ret = info->mtd[i]->suspend(info->mtd[i]);
- if (ret)
- goto fail;
- }
-
- return 0;
-fail:
- for (--i; i >= 0; --i)
- if (info->mtd[i]->suspend) {
- BUG_ON(!info->mtd[i]->resume);
- info->mtd[i]->resume(info->mtd[i]);
- }
-
- return ret;
-}
-
-static int physmap_flash_resume(struct platform_device *dev)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->resume)
- info->mtd[i]->resume(info->mtd[i]);
-
- return 0;
-}
-
static void physmap_flash_shutdown(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
@@ -242,16 +206,12 @@
info->mtd[i]->resume(info->mtd[i]);
}
#else
-#define physmap_flash_suspend NULL
-#define physmap_flash_resume NULL
#define physmap_flash_shutdown NULL
#endif
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
- .suspend = physmap_flash_suspend,
- .resume = physmap_flash_resume,
.shutdown = physmap_flash_shutdown,
.driver = {
.name = "physmap-flash",
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c83a60f..39d357b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -20,16 +20,23 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+struct of_flash_list {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+};
+
struct of_flash {
- struct mtd_info *mtd;
- struct map_info map;
- struct resource *res;
+ struct mtd_info *cmtd;
#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
#endif
+ int list_size; /* number of elements in of_flash_list */
+ struct of_flash_list list[0];
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -88,30 +95,44 @@
static int of_flash_remove(struct of_device *dev)
{
struct of_flash *info;
+ int i;
info = dev_get_drvdata(&dev->dev);
if (!info)
return 0;
dev_set_drvdata(&dev->dev, NULL);
- if (info->mtd) {
+#ifdef CONFIG_MTD_CONCAT
+ if (info->cmtd != info->list[0].mtd) {
+ del_mtd_device(info->cmtd);
+ mtd_concat_destroy(info->cmtd);
+ }
+#endif
+
+ if (info->cmtd) {
if (OF_FLASH_PARTS(info)) {
- del_mtd_partitions(info->mtd);
+ del_mtd_partitions(info->cmtd);
kfree(OF_FLASH_PARTS(info));
} else {
- del_mtd_device(info->mtd);
+ del_mtd_device(info->cmtd);
}
- map_destroy(info->mtd);
}
- if (info->map.virt)
- iounmap(info->map.virt);
+ for (i = 0; i < info->list_size; i++) {
+ if (info->list[i].mtd)
+ map_destroy(info->list[i].mtd);
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
+ if (info->list[i].map.virt)
+ iounmap(info->list[i].map.virt);
+
+ if (info->list[i].res) {
+ release_resource(info->list[i].res);
+ kfree(info->list[i].res);
+ }
}
+ kfree(info);
+
return 0;
}
@@ -164,68 +185,130 @@
const char *probe_type = match->data;
const u32 *width;
int err;
+ int i;
+ int count;
+ const u32 *p;
+ int reg_tuple_size;
+ struct mtd_info **mtd_list = NULL;
- err = -ENXIO;
- if (of_address_to_resource(dp, 0, &res)) {
- dev_err(&dev->dev, "Can't get IO address from device tree\n");
+ reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+
+ /*
+ * Get number of "reg" tuples. Scan for MTD devices on area's
+ * described by each "reg" region. This makes it possible (including
+ * the concat support) to support the Intel P30 48F4400 chips which
+ * consists internally of 2 non-identical NOR chips on one die.
+ */
+ p = of_get_property(dp, "reg", &count);
+ if (count % reg_tuple_size != 0) {
+ dev_err(&dev->dev, "Malformed reg property on %s\n",
+ dev->node->full_name);
+ err = -EINVAL;
goto err_out;
}
-
- dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
- (unsigned long long)res.start, (unsigned long long)res.end);
+ count /= reg_tuple_size;
err = -ENOMEM;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ if (!info)
+ goto err_out;
+
+ mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
if (!info)
goto err_out;
dev_set_drvdata(&dev->dev, info);
- err = -EBUSY;
- info->res = request_mem_region(res.start, res.end - res.start + 1,
- dev_name(&dev->dev));
- if (!info->res)
- goto err_out;
+ for (i = 0; i < count; i++) {
+ err = -ENXIO;
+ if (of_address_to_resource(dp, i, &res)) {
+ dev_err(&dev->dev, "Can't get IO address from device"
+ " tree\n");
+ goto err_out;
+ }
- err = -ENXIO;
- width = of_get_property(dp, "bank-width", NULL);
- if (!width) {
- dev_err(&dev->dev, "Can't get bank width from device tree\n");
- goto err_out;
+ dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
+ (unsigned long long)res.start,
+ (unsigned long long)res.end);
+
+ err = -EBUSY;
+ info->list[i].res = request_mem_region(res.start, res.end -
+ res.start + 1,
+ dev_name(&dev->dev));
+ if (!info->list[i].res)
+ goto err_out;
+
+ err = -ENXIO;
+ width = of_get_property(dp, "bank-width", NULL);
+ if (!width) {
+ dev_err(&dev->dev, "Can't get bank width from device"
+ " tree\n");
+ goto err_out;
+ }
+
+ info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.phys = res.start;
+ info->list[i].map.size = res.end - res.start + 1;
+ info->list[i].map.bankwidth = *width;
+
+ err = -ENOMEM;
+ info->list[i].map.virt = ioremap(info->list[i].map.phys,
+ info->list[i].map.size);
+ if (!info->list[i].map.virt) {
+ dev_err(&dev->dev, "Failed to ioremap() flash"
+ " region\n");
+ goto err_out;
+ }
+
+ simple_map_init(&info->list[i].map);
+
+ if (probe_type) {
+ info->list[i].mtd = do_map_probe(probe_type,
+ &info->list[i].map);
+ } else {
+ info->list[i].mtd = obsolete_probe(dev,
+ &info->list[i].map);
+ }
+ mtd_list[i] = info->list[i].mtd;
+
+ err = -ENXIO;
+ if (!info->list[i].mtd) {
+ dev_err(&dev->dev, "do_map_probe() failed\n");
+ goto err_out;
+ } else {
+ info->list_size++;
+ }
+ info->list[i].mtd->owner = THIS_MODULE;
+ info->list[i].mtd->dev.parent = &dev->dev;
}
- info->map.name = dev_name(&dev->dev);
- info->map.phys = res.start;
- info->map.size = res.end - res.start + 1;
- info->map.bankwidth = *width;
-
- err = -ENOMEM;
- info->map.virt = ioremap(info->map.phys, info->map.size);
- if (!info->map.virt) {
- dev_err(&dev->dev, "Failed to ioremap() flash region\n");
- goto err_out;
+ err = 0;
+ if (info->list_size == 1) {
+ info->cmtd = info->list[0].mtd;
+ } else if (info->list_size > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ info->cmtd = mtd_concat_create(mtd_list, info->list_size,
+ dev_name(&dev->dev));
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "physmap_of: multiple devices "
+ "found but MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
}
-
- simple_map_init(&info->map);
-
- if (probe_type)
- info->mtd = do_map_probe(probe_type, &info->map);
- else
- info->mtd = obsolete_probe(dev, &info->map);
-
- err = -ENXIO;
- if (!info->mtd) {
- dev_err(&dev->dev, "do_map_probe() failed\n");
+ if (err)
goto err_out;
- }
- info->mtd->owner = THIS_MODULE;
- info->mtd->dev.parent = &dev->dev;
#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- err = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
+ err = parse_mtd_partitions(info->cmtd, part_probe_types,
+ &info->parts, 0);
if (err < 0)
return err;
@@ -244,15 +327,19 @@
}
if (err > 0)
- add_mtd_partitions(info->mtd, info->parts, err);
+ add_mtd_partitions(info->cmtd, info->parts, err);
else
#endif
- add_mtd_device(info->mtd);
+ add_mtd_device(info->cmtd);
+
+ kfree(mtd_list);
return 0;
err_out:
+ kfree(mtd_list);
of_flash_remove(dev);
+
return err;
}
diff --git a/drivers/mtd/maps/pmcmsp-ramroot.c b/drivers/mtd/maps/pmcmsp-ramroot.c
deleted file mode 100644
index 30de5c0..0000000
--- a/drivers/mtd/maps/pmcmsp-ramroot.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Mapping of the rootfs in a physical region of memory
- *
- * Copyright (C) 2005-2007 PMC-Sierra Inc.
- * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/root_dev.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-#include <asm/io.h>
-
-#include <msp_prom.h>
-
-static struct mtd_info *rr_mtd;
-
-struct map_info rr_map = {
- .name = "ramroot",
- .bankwidth = 4,
-};
-
-static int __init init_rrmap(void)
-{
- void *ramroot_start;
- unsigned long ramroot_size;
-
- /* Check for supported rootfs types */
- if (get_ramroot(&ramroot_start, &ramroot_size)) {
- rr_map.phys = CPHYSADDR(ramroot_start);
- rr_map.size = ramroot_size;
-
- printk(KERN_NOTICE
- "PMC embedded root device: 0x%08lx @ 0x%08lx\n",
- rr_map.size, (unsigned long)rr_map.phys);
- } else {
- printk(KERN_ERR
- "init_rrmap: no supported embedded rootfs detected!\n");
- return -ENXIO;
- }
-
- /* Map rootfs to I/O space for block device driver */
- rr_map.virt = ioremap(rr_map.phys, rr_map.size);
- if (!rr_map.virt) {
- printk(KERN_ERR "Failed to ioremap\n");
- return -EIO;
- }
-
- simple_map_init(&rr_map);
-
- rr_mtd = do_map_probe("map_ram", &rr_map);
- if (rr_mtd) {
- rr_mtd->owner = THIS_MODULE;
-
- add_mtd_device(rr_mtd);
-
- return 0;
- }
-
- iounmap(rr_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_rrmap(void)
-{
- del_mtd_device(rr_mtd);
- map_destroy(rr_mtd);
-
- iounmap(rr_map.virt);
- rr_map.virt = NULL;
-}
-
-MODULE_AUTHOR("PMC-Sierra, Inc");
-MODULE_DESCRIPTION("MTD map driver for embedded PMC-Sierra MSP filesystem");
-MODULE_LICENSE("GPL");
-
-module_init(init_rrmap);
-module_exit(cleanup_rrmap);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 572d32f..643aa06 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -140,24 +140,6 @@
}
#ifdef CONFIG_PM
-static int pxa2xx_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info->mtd && info->mtd->suspend)
- ret = info->mtd->suspend(info->mtd);
- return ret;
-}
-
-static int pxa2xx_flash_resume(struct platform_device *dev)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd && info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -166,8 +148,6 @@
info->mtd->resume(info->mtd);
}
#else
-#define pxa2xx_flash_suspend NULL
-#define pxa2xx_flash_resume NULL
#define pxa2xx_flash_shutdown NULL
#endif
@@ -178,8 +158,6 @@
},
.probe = pxa2xx_flash_probe,
.remove = __devexit_p(pxa2xx_flash_remove),
- .suspend = pxa2xx_flash_suspend,
- .resume = pxa2xx_flash_resume,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index d39f0ad..83ed645 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -145,25 +145,6 @@
}
#ifdef CONFIG_PM
-static int rbtx4939_flash_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->suspend)
- return info->mtd->suspend(info->mtd);
- return 0;
-}
-
-static int rbtx4939_flash_resume(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
@@ -173,16 +154,12 @@
info->mtd->resume(info->mtd);
}
#else
-#define rbtx4939_flash_suspend NULL
-#define rbtx4939_flash_resume NULL
#define rbtx4939_flash_shutdown NULL
#endif
static struct platform_driver rbtx4939_flash_driver = {
.probe = rbtx4939_flash_probe,
.remove = rbtx4939_flash_remove,
- .suspend = rbtx4939_flash_suspend,
- .resume = rbtx4939_flash_resume,
.shutdown = rbtx4939_flash_shutdown,
.driver = {
.name = "rbtx4939-flash",
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 05e9362..c6210f5 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -415,25 +415,6 @@
}
#ifdef CONFIG_PM
-static int sa1100_mtd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info)
- ret = info->mtd->suspend(info->mtd);
-
- return ret;
-}
-
-static int sa1100_mtd_resume(struct platform_device *dev)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- if (info)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
@@ -441,16 +422,12 @@
info->mtd->resume(info->mtd);
}
#else
-#define sa1100_mtd_suspend NULL
-#define sa1100_mtd_resume NULL
#define sa1100_mtd_shutdown NULL
#endif
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
.remove = __exit_p(sa1100_mtd_remove),
- .suspend = sa1100_mtd_suspend,
- .resume = sa1100_mtd_resume,
.shutdown = sa1100_mtd_shutdown,
.driver = {
.name = "sa1100-mtd",
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 81756e3..d4314fb 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -22,15 +22,19 @@
/****************************************************************************/
+extern char _ebss;
+
struct map_info uclinux_ram_map = {
.name = "RAM",
+ .phys = (unsigned long)&_ebss,
+ .size = 0,
};
-struct mtd_info *uclinux_ram_mtdinfo;
+static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-struct mtd_partition uclinux_romfs[] = {
+static struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
@@ -38,7 +42,7 @@
/****************************************************************************/
-int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
+static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
@@ -55,12 +59,10 @@
{
struct mtd_info *mtd;
struct map_info *mapp;
- extern char _ebss;
- unsigned long addr = (unsigned long) &_ebss;
mapp = &uclinux_ram_map;
- mapp->phys = addr;
- mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(addr + 8))));
+ if (!mapp->size)
+ mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index aaac3b6..c3f6265 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -291,7 +291,7 @@
gd->private_data = new;
new->blkcore_priv = gd;
gd->queue = tr->blkcore_priv->rq;
- gd->driverfs_dev = new->mtd->dev.parent;
+ gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
set_disk_ro(gd, 1);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 763d3f0..5b081cb 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
+#include <linux/compat.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
@@ -355,6 +356,100 @@
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
+static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ uint32_t retlen;
+ int ret = 0;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->write_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(ops.oobbuf, ptr, length)) {
+ kfree(ops.oobbuf);
+ return -EFAULT;
+ }
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->write_oob(mtd, start, &ops);
+
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(retp, &retlen, sizeof(length)))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
+static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
+ uint32_t length, void __user *ptr, uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ int ret = 0;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->read_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_WRITE, ptr,
+ length) ? 0 : -EFAULT;
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->read_oob(mtd, start, &ops);
+
+ if (put_user(ops.oobretlen, retp))
+ ret = -EFAULT;
+ else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
+ ops.oobretlen))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
static int mtd_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg)
{
@@ -417,6 +512,7 @@
break;
case MEMERASE:
+ case MEMERASE64:
{
struct erase_info *erase;
@@ -427,20 +523,32 @@
if (!erase)
ret = -ENOMEM;
else {
- struct erase_info_user einfo;
-
wait_queue_head_t waitq;
DECLARE_WAITQUEUE(wait, current);
init_waitqueue_head(&waitq);
- if (copy_from_user(&einfo, argp,
- sizeof(struct erase_info_user))) {
- kfree(erase);
- return -EFAULT;
+ if (cmd == MEMERASE64) {
+ struct erase_info_user64 einfo64;
+
+ if (copy_from_user(&einfo64, argp,
+ sizeof(struct erase_info_user64))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo64.start;
+ erase->len = einfo64.length;
+ } else {
+ struct erase_info_user einfo32;
+
+ if (copy_from_user(&einfo32, argp,
+ sizeof(struct erase_info_user))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo32.start;
+ erase->len = einfo32.length;
}
- erase->addr = einfo.start;
- erase->len = einfo.length;
erase->mtd = mtd;
erase->callback = mtdchar_erase_callback;
erase->priv = (unsigned long)&waitq;
@@ -474,100 +582,56 @@
case MEMWRITEOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
- struct mtd_oob_buf __user *user_buf = argp;
- uint32_t retlen;
+ struct mtd_oob_buf __user *buf_user = argp;
- if(!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
-
- if (!mtd->write_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_READ, buf.ptr,
- buf.length) ? 0 : EFAULT;
-
- if (ret)
- return ret;
-
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
-
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
-
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
-
- if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
- kfree(ops.oobbuf);
- return -EFAULT;
- }
-
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->write_oob(mtd, buf.start, &ops);
-
- if (ops.oobretlen > 0xFFFFFFFFU)
- ret = -EOVERFLOW;
- retlen = ops.oobretlen;
- if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
+ /* NOTE: writes return length to buf_user->length */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
-
- kfree(ops.oobbuf);
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->length);
break;
-
}
case MEMREADOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
+ struct mtd_oob_buf __user *buf_user = argp;
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
-
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
+ /* NOTE: writes return length to buf_user->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
else
- ret = access_ok(VERIFY_WRITE, buf.ptr,
- buf.length) ? 0 : -EFAULT;
- if (ret)
- return ret;
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->start);
+ break;
+ }
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ case MEMWRITEOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
-
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
-
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->read_oob(mtd, buf.start, &ops);
-
- if (put_user(ops.oobretlen, (uint32_t __user *)argp))
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
- else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
- ops.oobretlen))
- ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
- kfree(ops.oobbuf);
+ case MEMREADOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
break;
}
@@ -758,6 +822,68 @@
return ret;
} /* memory_ioctl */
+#ifdef CONFIG_COMPAT
+
+struct mtd_oob_buf32 {
+ u_int32_t start;
+ u_int32_t length;
+ compat_caddr_t ptr; /* unsigned char* */
+};
+
+#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
+#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
+
+static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ void __user *argp = compat_ptr(arg);
+ int ret = 0;
+
+ lock_kernel();
+
+ switch (cmd) {
+ case MEMWRITEOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_readoob(mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->start);
+ break;
+ }
+ default:
+ ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
+ }
+
+ unlock_kernel();
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
/*
* try to determine where a shared mapping can be made
* - only supported for NOMMU at the moment (MMU can't doesn't copy private
@@ -817,6 +943,9 @@
.read = mtd_read,
.write = mtd_write,
.ioctl = mtd_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtd_compat_ioctl,
+#endif
.open = mtd_open,
.release = mtd_close,
.mmap = mtd_mmap,
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index bccb4b1..fac54a3 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -23,8 +23,15 @@
#include "mtdcore.h"
+static int mtd_cls_suspend(struct device *dev, pm_message_t state);
+static int mtd_cls_resume(struct device *dev);
-static struct class *mtd_class;
+static struct class mtd_class = {
+ .name = "mtd",
+ .owner = THIS_MODULE,
+ .suspend = mtd_cls_suspend,
+ .resume = mtd_cls_resume,
+};
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@@ -52,7 +59,26 @@
/* remove /dev/mtdXro node if needed */
if (index)
- device_destroy(mtd_class, index + 1);
+ device_destroy(&mtd_class, index + 1);
+}
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd->suspend)
+ return mtd->suspend(mtd);
+ else
+ return 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd->resume)
+ mtd->resume(mtd);
+ return 0;
}
static ssize_t mtd_type_show(struct device *dev,
@@ -269,7 +295,7 @@
* physical device.
*/
mtd->dev.type = &mtd_devtype;
- mtd->dev.class = mtd_class;
+ mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
if (device_register(&mtd->dev) != 0) {
@@ -278,7 +304,7 @@
}
if (MTD_DEVT(i))
- device_create(mtd_class, mtd->dev.parent,
+ device_create(&mtd_class, mtd->dev.parent,
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
@@ -604,11 +630,12 @@
static int __init init_mtd(void)
{
- mtd_class = class_create(THIS_MODULE, "mtd");
+ int ret;
+ ret = class_register(&mtd_class);
- if (IS_ERR(mtd_class)) {
- pr_err("Error creating mtd class.\n");
- return PTR_ERR(mtd_class);
+ if (ret) {
+ pr_err("Error registering mtd class: %d\n", ret);
+ return ret;
}
#ifdef CONFIG_PROC_FS
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
@@ -623,7 +650,7 @@
if (proc_mtd)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
- class_destroy(mtd_class);
+ class_unregister(&mtd_class);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 29675ed..349fcbe 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -27,9 +27,7 @@
struct mtd_info mtd;
struct mtd_info *master;
uint64_t offset;
- int index;
struct list_head list;
- int registered;
};
/*
@@ -321,8 +319,7 @@
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
list_del(&slave->list);
- if (slave->registered)
- del_mtd_device(&slave->mtd);
+ del_mtd_device(&slave->mtd);
kfree(slave);
}
@@ -395,7 +392,7 @@
slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
if (master->sync)
slave->mtd.sync = part_sync;
- if (!partno && master->suspend && master->resume) {
+ if (!partno && !master->dev.class && master->suspend && master->resume) {
slave->mtd.suspend = part_suspend;
slave->mtd.resume = part_resume;
}
@@ -412,7 +409,6 @@
slave->mtd.erase = part_erase;
slave->master = master;
slave->offset = part->offset;
- slave->index = partno;
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
@@ -500,15 +496,9 @@
}
out_register:
- if (part->mtdp) {
- /* store the object pointer (caller may or may not register it*/
- *part->mtdp = &slave->mtd;
- slave->registered = 0;
- } else {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
- }
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+
return slave;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f327689..ce96c09 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,12 @@
help
Support for NAND flash on Amstrad E3 (Delta).
+config MTD_NAND_OMAP2
+ tristate "NAND Flash device on OMAP2 and OMAP3"
+ depends on ARM && MTD_NAND && (ARCH_OMAP2 || ARCH_OMAP3)
+ help
+ Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+
config MTD_NAND_TS7250
tristate "NAND Flash device on TS-7250 board"
depends on MACH_TS72XX
@@ -139,27 +145,27 @@
This enables the NAND flash driver on the PPChameleon EVB Board.
config MTD_NAND_S3C2410
- tristate "NAND Flash support for S3C2410/S3C2440 SoC"
- depends on ARCH_S3C2410
+ tristate "NAND Flash support for Samsung S3C SoCs"
+ depends on ARCH_S3C2410 || ARCH_S3C64XX
help
- This enables the NAND flash controller on the S3C2410 and S3C2440
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
SoCs
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
config MTD_NAND_S3C2410_DEBUG
- bool "S3C2410 NAND driver debug"
+ bool "Samsung S3C NAND driver debug"
depends on MTD_NAND_S3C2410
help
- Enable debugging of the S3C2410 NAND driver
+ Enable debugging of the S3C NAND driver
config MTD_NAND_S3C2410_HWECC
- bool "S3C2410 NAND Hardware ECC"
+ bool "Samsung S3C NAND Hardware ECC"
depends on MTD_NAND_S3C2410
help
- Enable the use of the S3C2410's internal ECC generator when
- using NAND. Early versions of the chip have had problems with
+ Enable the use of the controller's internal ECC generator when
+ using NAND. Early versions of the chips have had problems with
incorrect ECC generation, and if using these, the default of
software ECC is preferable.
@@ -171,7 +177,7 @@
NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
config MTD_NAND_S3C2410_CLKSTOP
- bool "S3C2410 NAND IDLE clock stop"
+ bool "Samsung S3C NAND IDLE clock stop"
depends on MTD_NAND_S3C2410
default n
help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d33860a..f3a786b 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -25,6 +25,7 @@
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 47a33ce..2802992 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -47,6 +48,9 @@
#define no_ecc 0
#endif
+static int on_flash_bbt = 0;
+module_param(on_flash_bbt, int, 0);
+
/* Register access macros */
#define ecc_readl(add, reg) \
__raw_readl(add + ATMEL_ECC_##reg)
@@ -459,12 +463,17 @@
if (host->board->det_pin) {
if (gpio_get_value(host->board->det_pin)) {
- printk("No SmartMedia card inserted.\n");
+ printk(KERN_INFO "No SmartMedia card inserted.\n");
res = ENXIO;
goto err_no_card;
}
}
+ if (on_flash_bbt) {
+ printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+ nand_chip->options |= NAND_USE_FLASH_BBT;
+ }
+
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
res = -ENXIO;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4c2a67c..8506e7e 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -458,7 +458,7 @@
return IRQ_HANDLED;
}
-static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
+static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
uint8_t *buf, int is_read)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
@@ -496,11 +496,20 @@
/* setup DMA register with Blackfin DMA API */
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
+
+/* The DMAs have different size on BF52x and BF54x */
+#ifdef CONFIG_BF52x
+ set_dma_x_count(CH_NFC, (page_size >> 1));
+ set_dma_x_modify(CH_NFC, 2);
+ val = DI_EN | WDSIZE_16;
+#endif
+
+#ifdef CONFIG_BF54x
set_dma_x_count(CH_NFC, (page_size >> 2));
set_dma_x_modify(CH_NFC, 4);
-
- /* setup write or read operation */
val = DI_EN | WDSIZE_32;
+#endif
+ /* setup write or read operation */
if (is_read)
val |= WNR;
set_dma_config(CH_NFC, val);
@@ -512,8 +521,6 @@
else
bfin_write_NFC_PGCTL(0x2);
wait_for_completion(&info->dma_completion);
-
- return 0;
}
static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 02700f7..0fad648 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -44,7 +44,7 @@
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
- * The 1-bit ECC hardware is supported, but not yet the newer 4-bit ECC
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
@@ -54,11 +54,14 @@
struct davinci_nand_info {
struct mtd_info mtd;
struct nand_chip chip;
+ struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
bool partitioned;
+ bool is_readmode;
+
void __iomem *base;
void __iomem *vaddr;
@@ -73,6 +76,7 @@
};
static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
@@ -218,6 +222,192 @@
/*----------------------------------------------------------------------*/
/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+ u_char *data, u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ unsigned num_errors, corrected;
+
+ /* All bytes 0xff? It's an erased page; ignore its ECC. */
+ for (i = 0; i < 10; i++) {
+ if (ecc_code[i] != 0xff)
+ goto compare;
+ }
+ return 0;
+
+compare:
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (unsigned) ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ return 0;
+ case 1: /* five or more errors detected */
+ return -EIO;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
* NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
* how these chips are normally wired. This translates to both 8 and 16
* bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
@@ -294,6 +484,23 @@
/*----------------------------------------------------------------------*/
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_small __initconst = {
+ .eccbytes = 10,
+ .eccpos = { 0, 1, 2, 3, 4,
+ /* offset 5 holds the badblock marker */
+ 6, 7,
+ 13, 14, 15, },
+ .oobfree = {
+ {.offset = 8, .length = 5, },
+ {.offset = 16, },
+ },
+};
+
+
static int __init nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
@@ -306,6 +513,10 @@
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
/* which external chipselect will we be managing? */
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
@@ -351,7 +562,7 @@
info->chip.select_chip = nand_davinci_select_chip;
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
- info->chip.options = pdata ? pdata->options : 0;
+ info->chip.options = pdata->options;
info->ioaddr = (uint32_t __force) vaddr;
@@ -360,14 +571,8 @@
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
- if (pdata && pdata->mask_ale)
- info->mask_ale = pdata->mask_cle;
- else
- info->mask_ale = MASK_ALE;
- if (pdata && pdata->mask_cle)
- info->mask_cle = pdata->mask_cle;
- else
- info->mask_cle = MASK_CLE;
+ info->mask_ale = pdata->mask_cle ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
info->chip.cmd_ctrl = nand_davinci_hwcontrol;
@@ -377,30 +582,44 @@
info->chip.read_buf = nand_davinci_read_buf;
info->chip.write_buf = nand_davinci_write_buf;
- /* use board-specific ECC config; else, the best available */
- if (pdata)
- ecc_mode = pdata->ecc_mode;
- else
- ecc_mode = NAND_ECC_HW;
+ /* Use board-specific ECC config */
+ ecc_mode = pdata->ecc_mode;
+ ret = -EINVAL;
switch (ecc_mode) {
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
break;
case NAND_ECC_HW:
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
- info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 3;
- break;
- case NAND_ECC_HW_SYNDROME:
- /* FIXME implement */
- info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 10;
+ if (pdata->ecc_bits == 4) {
+ /* No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
- dev_warn(&pdev->dev, "4-bit ECC nyet supported\n");
- /* FALL THROUGH */
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ goto err_ecc;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ } else {
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ }
+ info->chip.ecc.size = 512;
+ break;
default:
ret = -EINVAL;
goto err_ecc;
@@ -441,12 +660,56 @@
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
}
+ /* Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = info->mtd.writesize / 512;
+
+ if (!chunks || info->mtd.oobsize < 16) {
+ dev_dbg(&pdev->dev, "too small\n");
+ ret = -EINVAL;
+ goto err_scan;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ info->ecclayout = hwecc4_small;
+ info->ecclayout.oobfree[1].length =
+ info->mtd.oobsize - 16;
+ goto syndrome_done;
+ }
+
+ /* For large page chips we'll be wanting to use a
+ * not-yet-implemented mode that reads OOB data
+ * before reading the body of the page, to avoid
+ * the "infix OOB" model of NAND_ECC_HW_SYNDROME
+ * (and preserve manufacturer badblock markings).
+ */
+ dev_warn(&pdev->dev, "no 4-bit ECC support yet "
+ "for large page NAND\n");
+ ret = -EIO;
+ goto err_scan;
+
+syndrome_done:
+ info->chip.ecc.layout = &info->ecclayout;
+ }
+
+ ret = nand_scan_tail(&info->mtd);
+ if (ret < 0)
+ goto err_scan;
+
if (mtd_has_partitions()) {
struct mtd_partition *mtd_parts = NULL;
int mtd_parts_nb = 0;
@@ -455,22 +718,11 @@
static const char *probes[] __initconst =
{ "cmdlinepart", NULL };
- const char *master_name;
-
- /* Set info->mtd.name = 0 temporarily */
- master_name = info->mtd.name;
- info->mtd.name = (char *)0;
-
- /* info->mtd.name == 0, means: don't bother checking
- <mtd-id> */
mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
&mtd_parts, 0);
-
- /* Restore info->mtd.name */
- info->mtd.name = master_name;
}
- if (mtd_parts_nb <= 0 && pdata) {
+ if (mtd_parts_nb <= 0) {
mtd_parts = pdata->parts;
mtd_parts_nb = pdata->nr_parts;
}
@@ -483,7 +735,7 @@
info->partitioned = true;
}
- } else if (pdata && pdata->nr_parts) {
+ } else if (pdata->nr_parts) {
dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, info->mtd.name);
}
@@ -509,6 +761,11 @@
err_clk_enable:
clk_put(info->clk);
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
err_ecc:
err_clk:
err_ioremap:
@@ -532,6 +789,11 @@
else
status = del_mtd_device(&info->mtd);
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
iounmap(info->base);
iounmap(info->vaddr);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 40c2608..76beea4 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -138,7 +138,14 @@
static struct nand_ecclayout nand_hw_eccoob_16 = {
.eccbytes = 5,
.eccpos = {6, 7, 8, 9, 10},
- .oobfree = {{0, 6}, {12, 4}, }
+ .oobfree = {{0, 5}, {11, 5}, }
+};
+
+static struct nand_ecclayout nand_hw_eccoob_64 = {
+ .eccbytes = 20,
+ .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
+ .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -192,7 +199,7 @@
}
udelay(1);
}
- if (max_retries <= 0)
+ if (max_retries < 0)
DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
__func__, param);
}
@@ -795,9 +802,13 @@
send_addr(host, (page_addr & 0xff), false);
if (host->pagesize_2k) {
- send_addr(host, (page_addr >> 8) & 0xFF, false);
- if (mtd->size >= 0x40000000)
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, false);
send_addr(host, (page_addr >> 16) & 0xff, true);
+ } else
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
@@ -923,7 +934,6 @@
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 512;
this->ecc.bytes = 3;
- this->ecc.layout = &nand_hw_eccoob_8;
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
@@ -957,12 +967,44 @@
this->ecc.layout = &nand_hw_eccoob_16;
}
- host->pagesize_2k = 0;
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1)) {
+ err = -ENXIO;
+ goto escan;
+ }
- /* Scan to find existence of the device */
- if (nand_scan(mtd, 1)) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "MXC_ND: Unable to find any NAND device.\n");
+ host->pagesize_2k = (mtd->writesize == 2048) ? 1 : 0;
+
+ if (this->ecc.mode == NAND_ECC_HW) {
+ switch (mtd->oobsize) {
+ case 8:
+ this->ecc.layout = &nand_hw_eccoob_8;
+ break;
+ case 16:
+ this->ecc.layout = &nand_hw_eccoob_16;
+ break;
+ case 64:
+ this->ecc.layout = &nand_hw_eccoob_64;
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ this->ecc.size = 512;
+ this->ecc.bytes = 3;
+ this->ecc.layout = &nand_hw_eccoob_8;
+ this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.calculate = NULL;
+ this->ecc.correct = NULL;
+ this->ecc.hwctl = NULL;
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_ECC_EN;
+ writew(tmp, host->regs + NFC_CONFIG1);
+ break;
+ }
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
err = -ENXIO;
goto escan;
}
@@ -985,7 +1027,7 @@
return 0;
escan:
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
eirq:
iounmap(host->regs);
eres:
@@ -1005,7 +1047,7 @@
platform_set_drvdata(pdev, NULL);
nand_release(&host->mtd);
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
iounmap(host->regs);
kfree(host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3d7ed43..8c21b89 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2756,7 +2756,8 @@
* the out of band area
*/
chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length; i++)
+ for (i = 0; chip->ecc.layout->oobfree[i].length
+ && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
chip->ecc.layout->oobavail +=
chip->ecc.layout->oobfree[i].length;
mtd->oobavail = chip->ecc.layout->oobavail;
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 868147a..c0cb87d 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -428,8 +428,8 @@
int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
- unsigned char b0, b1, b2;
- unsigned char byte_addr, bit_addr;
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
/* 256 or 512 bytes/ecc */
const uint32_t eccsize_mult =
(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
new file mode 100644
index 0000000..0cd76f8
--- /dev/null
+++ b/drivers/mtd/nand/omap2.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+
+#include <asm/dma.h>
+
+#include <mach/gpmc.h>
+#include <mach/nand.h>
+
+#define GPMC_IRQ_STATUS 0x18
+#define GPMC_ECC_CONFIG 0x1F4
+#define GPMC_ECC_CONTROL 0x1F8
+#define GPMC_ECC_SIZE_CONFIG 0x1FC
+#define GPMC_ECC1_RESULT 0x200
+
+#define DRIVER_NAME "omap2-nand"
+
+/* size (4 KiB) for IO mapping */
+#define NAND_IO_SIZE SZ_4K
+
+#define NAND_WP_OFF 0
+#define NAND_WP_BIT 0x00000010
+#define WR_RD_PIN_MONITORING 0x00600000
+
+#define GPMC_BUF_FULL 0x00000001
+#define GPMC_BUF_EMPTY 0x00000000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+struct omap_nand_info {
+ struct nand_hw_control controller;
+ struct omap_nand_platform_data *pdata;
+ struct mtd_info mtd;
+ struct mtd_partition *parts;
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ unsigned long phys_base;
+ void __iomem *gpmc_cs_baseaddr;
+ void __iomem *gpmc_baseaddr;
+};
+
+/**
+ * omap_nand_wp - This function enable or disable the Write Protect feature
+ * @mtd: MTD device structure
+ * @mode: WP ON/OFF
+ */
+static void omap_nand_wp(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+
+ unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
+
+ if (mode)
+ config &= ~(NAND_WP_BIT); /* WP is ON */
+ else
+ config |= (NAND_WP_BIT); /* WP is OFF */
+
+ __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
+}
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ switch (ctrl) {
+ case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_ADDRESS;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_NCE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ __raw_writeb(cmd, info->nand.IO_ADDR_W);
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ __raw_readsw(nand->IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u16 *p = (u16 *) buf;
+
+ /* FIXME try bursts of writesw() or DMA ... */
+ len >>= 1;
+
+ while (len--) {
+ writew(*p++, info->nand.IO_ADDR_W);
+
+ while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
+ GPMC_STATUS) & GPMC_BUF_FULL))
+ ;
+ }
+}
+/**
+ * omap_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ */
+static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ u16 *p = (u16 *) buf;
+
+ len >>= 1;
+ while (len--) {
+ if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+/**
+ * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
+ * @mtd: MTD device structure
+ */
+static void omap_hwecc_init(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned long val = 0x0;
+
+ /* Read from ECC Control Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* Clear all ECC | Enable Reg1 */
+ val = ((0x00000001<<8) | 0x00000001);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+
+ /* Read from ECC Size Config Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+ /* ECCSIZE1=512 | Select eccResultsize[0-3] */
+ val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+ return -1;
+
+ case 11:
+ /* UN-Correctable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
+ return -1;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
+ "offset: %d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 0;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
+ return -1;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
+ * and correction.
+ */
+static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int blockCnt = 0, i = 0, ret = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+ (info->nand.ecc.size == 2048))
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return 0;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long val = 0x0;
+ unsigned long reg;
+
+ /* Start Reading from HW ECC1_Result = 0x200 */
+ reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
+ val = __raw_readl(reg);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+ reg += 4;
+
+ return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_READSYN:
+ __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_WRITE:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ default:
+ DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
+ mode);
+ break;
+ }
+
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+}
+#endif
+
+/**
+ * omap_wait - wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long timeo = jiffies;
+ int status, state = this->state;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
+
+ __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
+
+ while (time_before(jiffies, timeo)) {
+ status = __raw_readb(this->IO_ADDR_R);
+ if (!(status & 0x40))
+ break;
+ }
+ return status;
+}
+
+/**
+ * omap_dev_ready - calls the platform specific dev_ready function
+ * @mtd: MTD device structure
+ */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+
+ if ((val & 0x100) == 0x100) {
+ /* Clear IRQ Interrupt */
+ val |= 0x100;
+ val &= ~(0x0);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+ } else {
+ unsigned int cnt = 0;
+ while (cnt++ < 0x1FF) {
+ if ((val & 0x100) == 0x100)
+ return 0;
+ val = __raw_readl(info->gpmc_baseaddr +
+ GPMC_IRQ_STATUS);
+ }
+ }
+
+ return 1;
+}
+
+static int __devinit omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct omap_nand_platform_data *pdata;
+ int err;
+ unsigned long val;
+
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ info->pdev = pdev;
+
+ info->gpmc_cs = pdata->cs;
+ info->gpmc_baseaddr = pdata->gpmc_baseaddr;
+ info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
+
+ info->mtd.priv = &info->nand;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
+
+ err = gpmc_cs_request(info->gpmc_cs, NAND_IO_SIZE, &info->phys_base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+ goto out_free_info;
+ }
+
+ /* Enable RD PIN Monitoring Reg */
+ if (pdata->dev_ready) {
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1);
+ val |= WR_RD_PIN_MONITORING;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG1, val);
+ }
+
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG7);
+ val &= ~(0xf << 8);
+ val |= (0xc & 0xf) << 8;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG7, val);
+
+ /* NAND write protect off */
+ omap_nand_wp(&info->mtd, NAND_WP_OFF);
+
+ if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ pdev->dev.driver->name)) {
+ err = -EBUSY;
+ goto out_free_cs;
+ }
+
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ if (!info->nand.IO_ADDR_R) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+ info->nand.controller = &info->controller;
+
+ info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
+ info->nand.cmd_ctrl = omap_hwcontrol;
+
+ /* REVISIT: only supports 16-bit NAND flash */
+
+ info->nand.read_buf = omap_read_buf16;
+ info->nand.write_buf = omap_write_buf16;
+ info->nand.verify_buf = omap_verify_buf;
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * funcrtion and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (pdata->dev_ready) {
+ info->nand.dev_ready = omap_dev_ready;
+ info->nand.chip_delay = 0;
+ } else {
+ info->nand.waitfunc = omap_wait;
+ info->nand.chip_delay = 50;
+ }
+
+ info->nand.options |= NAND_SKIP_BBTSCAN;
+ if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000)
+ == 0x1000)
+ info->nand.options |= NAND_BUSWIDTH_16;
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+
+ /* init HW ECC */
+ omap_hwecc_init(&info->mtd);
+#else
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+#endif
+
+ /* DIP switches on some boards change between 8 and 16 bit
+ * bus widths for flash. Try the other width if the first try fails.
+ */
+ if (nand_scan(&info->mtd, 1)) {
+ info->nand.options ^= NAND_BUSWIDTH_16;
+ if (nand_scan(&info->mtd, 1)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(&info->mtd, info->parts, err);
+ else if (pdata->parts)
+ add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ add_mtd_device(&info->mtd);
+
+ platform_set_drvdata(pdev, &info->mtd);
+
+ return 0;
+
+out_release_mem_region:
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+out_free_cs:
+ gpmc_cs_free(info->gpmc_cs);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct omap_nand_info *info = mtd->priv;
+
+ platform_set_drvdata(pdev, NULL);
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+ kfree(&info->mtd);
+ return 0;
+}
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_nand_init(void)
+{
+ printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ return platform_driver_register(&omap_nand_driver);
+}
+
+static void __exit omap_nand_exit(void)
+{
+ platform_driver_unregister(&omap_nand_driver);
+}
+
+module_init(omap_nand_init);
+module_exit(omap_nand_exit);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index c2dfd3e..7ad9722 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -47,6 +47,28 @@
writeb(cmd, nc->IO_ADDR_W + offs);
}
+static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *io_base = chip->IO_ADDR_R;
+ uint64_t *buf64;
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ uint64_t x;
+ asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct mtd_info *mtd;
@@ -83,6 +105,7 @@
nc->priv = board;
nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->read_buf = orion_nand_read_buf;
nc->ecc.mode = NAND_ECC_SOFT;
if (board->chip_delay)
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 86e1d08..4e16c6f 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -61,6 +61,8 @@
data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
data->chip.dev_ready = pdata->ctrl.dev_ready;
data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.write_buf = pdata->ctrl.write_buf;
+ data->chip.read_buf = pdata->ctrl.read_buf;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
@@ -70,6 +72,13 @@
platform_set_drvdata(pdev, data);
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ res = pdata->ctrl.probe(pdev);
+ if (res)
+ goto out;
+ }
+
/* Scan to find existance of the device */
if (nand_scan(&data->mtd, 1)) {
res = -ENXIO;
@@ -86,6 +95,8 @@
return 0;
}
}
+ if (pdata->chip.set_parts)
+ pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
res = add_mtd_partitions(&data->mtd, data->parts,
@@ -99,6 +110,8 @@
nand_release(&data->mtd);
out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
platform_set_drvdata(pdev, NULL);
iounmap(data->io_base);
kfree(data);
@@ -111,15 +124,15 @@
static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
-#ifdef CONFIG_MTD_PARTITIONS
struct platform_nand_data *pdata = pdev->dev.platform_data;
-#endif
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS
if (data->parts && data->parts != pdata->chip.partitions)
kfree(data->parts);
#endif
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
iounmap(data->io_base);
kfree(data);
@@ -128,7 +141,7 @@
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove = plat_nand_remove,
+ .remove = __devexit_p(plat_nand_remove),
.driver = {
.name = "gen_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 8e375d5..11dc7e6 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -74,6 +74,14 @@
struct s3c2410_nand_info;
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+ * @scan_res: The result from calling nand_scan_ident().
+*/
struct s3c2410_nand_mtd {
struct mtd_info mtd;
struct nand_chip chip;
@@ -90,6 +98,21 @@
/* overview of the s3c2410 nand state */
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @area: The IO area resource that came from request_mem_region().
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers described by @area.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @cpu_type: The exact type of this controller.
+ */
struct s3c2410_nand_info {
/* mtd info */
struct nand_hw_control controller;
@@ -145,12 +168,19 @@
#define NS_IN_KHZ 1000000
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
- result = (wanted * clk) / NS_IN_KHZ;
- result++;
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
@@ -169,13 +199,21 @@
/* controller setup */
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
{
struct s3c2410_platform_nand *plat = info->platform;
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long set, cfg, mask;
+ unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
unsigned long flags;
/* calculate the timing information for the controller */
@@ -215,9 +253,9 @@
case TYPE_S3C2440:
case TYPE_S3C2412:
- mask = (S3C2410_NFCONF_TACLS(tacls_max - 1) |
- S3C2410_NFCONF_TWRPH0(7) |
- S3C2410_NFCONF_TWRPH1(7));
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
set = S3C2440_NFCONF_TACLS(tacls - 1);
set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
@@ -225,14 +263,9 @@
break;
default:
- /* keep compiler happy */
- mask = 0;
- set = 0;
BUG();
}
- dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
local_irq_save(flags);
cfg = readl(info->regs + S3C2410_NFCONF);
@@ -242,9 +275,18 @@
local_irq_restore(flags);
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
return 0;
}
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
{
int ret;
@@ -268,8 +310,19 @@
return 0;
}
-/* select chip */
-
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @mtd: The MTD instance for this chip.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
{
struct s3c2410_nand_info *info;
@@ -530,7 +583,16 @@
static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- readsl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
}
static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
@@ -542,7 +604,16 @@
static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- writesl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
}
/* cpufreq driver support */
@@ -593,7 +664,7 @@
/* device management functions */
-static int s3c2410_nand_remove(struct platform_device *pdev)
+static int s3c24xx_nand_remove(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = to_nand_info(pdev);
@@ -645,17 +716,31 @@
}
#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
+ struct mtd_partition *part_info;
+ int nr_part = 0;
+
if (set == NULL)
return add_mtd_device(&mtd->mtd);
- if (set->nr_partitions > 0 && set->partitions != NULL) {
- return add_mtd_partitions(&mtd->mtd, set->partitions, set->nr_partitions);
+ if (set->nr_partitions == 0) {
+ mtd->mtd.name = set->name;
+ nr_part = parse_mtd_partitions(&mtd->mtd, part_probes,
+ &part_info, 0);
+ } else {
+ if (set->nr_partitions > 0 && set->partitions != NULL) {
+ nr_part = set->nr_partitions;
+ part_info = set->partitions;
+ }
}
+ if (nr_part > 0 && part_info)
+ return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
+
return add_mtd_device(&mtd->mtd);
}
#else
@@ -667,11 +752,16 @@
}
#endif
-/* s3c2410_nand_init_chip
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
*
- * init a single instance of an chip
-*/
-
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd,
struct s3c2410_nand_set *set)
@@ -757,14 +847,40 @@
if (set->disable_ecc)
chip->ecc.mode = NAND_ECC_NONE;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_NONE:
+ dev_info(info->device, "NAND ECC disabled\n");
+ break;
+ case NAND_ECC_SOFT:
+ dev_info(info->device, "NAND soft ECC\n");
+ break;
+ case NAND_ECC_HW:
+ dev_info(info->device, "NAND hardware ECC\n");
+ break;
+ default:
+ dev_info(info->device, "NAND ECC UNKNOWN\n");
+ break;
+ }
+
+ /* If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND, and also skip the
+ * full NAND scan that can take 1/2s or so. Little things... */
+ if (set->flash_bbt)
+ chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
}
-/* s3c2410_nand_update_chip
+/**
+ * s3c2410_nand_update_chip - post probe update
+ * @info: The controller instance.
+ * @nmtd: The driver version of the MTD instance.
*
- * post-probe chip update, to change any items, such as the
- * layout for large page nand
- */
-
+ * This routine is called after the chip probe has succesfully completed
+ * and the relevant per-chip information updated. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd)
{
@@ -773,33 +889,33 @@
dev_dbg(info->device, "chip %p => page shift %d\n",
chip, chip->page_shift);
- if (hardware_ecc) {
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return;
+
/* change the behaviour depending on wether we are using
* the large or small page nand device */
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.layout = &nand_hw_eccoob;
- }
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.layout = &nand_hw_eccoob;
}
}
-/* s3c2410_nand_probe
+/* s3c24xx_nand_probe
*
* called by device layer when it finds a device matching
* one our driver can handled. This code checks to see if
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-
-static int s3c24xx_nand_probe(struct platform_device *pdev,
- enum s3c_cpu_type cpu_type)
+static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ enum s3c_cpu_type cpu_type;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -809,6 +925,8 @@
int nr_sets;
int setno;
+ cpu_type = platform_get_device_id(pdev)->driver_data;
+
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -922,7 +1040,7 @@
return 0;
exit_error:
- s3c2410_nand_remove(pdev);
+ s3c24xx_nand_remove(pdev);
if (err == 0)
err = -EINVAL;
@@ -983,50 +1101,33 @@
/* driver device registration */
-static int s3c2410_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2410);
-}
-
-static int s3c2440_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2440);
-}
-
-static int s3c2412_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2412);
-}
-
-static struct platform_driver s3c2410_nand_driver = {
- .probe = s3c2410_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2410-nand",
- .owner = THIS_MODULE,
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
},
+ { }
};
-static struct platform_driver s3c2440_nand_driver = {
- .probe = s3c2440_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2440-nand",
- .owner = THIS_MODULE,
- },
-};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-static struct platform_driver s3c2412_nand_driver = {
- .probe = s3c2412_nand_probe,
- .remove = s3c2410_nand_remove,
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove = s3c24xx_nand_remove,
.suspend = s3c24xx_nand_suspend,
.resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
.driver = {
- .name = "s3c2412-nand",
+ .name = "s3c24xx-nand",
.owner = THIS_MODULE,
},
};
@@ -1035,16 +1136,12 @@
{
printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
- platform_driver_register(&s3c2412_nand_driver);
- platform_driver_register(&s3c2440_nand_driver);
- return platform_driver_register(&s3c2410_nand_driver);
+ return platform_driver_register(&s3c24xx_nand_driver);
}
static void __exit s3c2410_nand_exit(void)
{
- platform_driver_unregister(&s3c2412_nand_driver);
- platform_driver_unregister(&s3c2440_nand_driver);
- platform_driver_unregister(&s3c2410_nand_driver);
+ platform_driver_unregister(&s3c24xx_nand_driver);
}
module_init(s3c2410_nand_init);
@@ -1053,6 +1150,3 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
-MODULE_ALIAS("platform:s3c2410-nand");
-MODULE_ALIAS("platform:s3c2412-nand");
-MODULE_ALIAS("platform:s3c2440-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 8124792..488088e 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -64,7 +64,7 @@
struct nand_chip chip;
struct mtd_info mtd;
int cs;
- char mtdname[BUS_ID_SIZE + 2];
+ const char *mtdname;
};
#define MAX_TXX9NDFMC_DEV 4
@@ -334,16 +334,23 @@
if (plat->ch_mask != 1) {
txx9_priv->cs = i;
- sprintf(txx9_priv->mtdname, "%s.%u",
- dev_name(&dev->dev), i);
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
} else {
txx9_priv->cs = -1;
- strcpy(txx9_priv->mtdname, dev_name(&dev->dev));
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
}
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
if (nand_scan(mtd, 1)) {
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
@@ -385,6 +392,7 @@
kfree(drvdata->parts[i]);
#endif
del_mtd_device(mtd);
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
return 0;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6391e3d..38d656b 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -565,7 +565,7 @@
NULL, __adjust_timing);
}
-static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
+static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -777,7 +777,7 @@
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = omap2_onenand_remove,
+ .remove = __devexit_p(omap2_onenand_remove),
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 30d6999..6e82909 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -9,6 +9,10 @@
* auto-placement support, read-while load support, various fixes
* Copyright (C) Nokia Corporation, 2007
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -16,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -27,6 +32,38 @@
#include <asm/io.h>
+/* Default Flex-OneNAND boundary and lock respectively */
+static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
+
+module_param_array(flex_bdry, int, NULL, 0400);
+MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
+ "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
+ "DIE_BDRY: SLC boundary of the die"
+ "LOCK: Locking information for SLC boundary"
+ " : 0->Set boundary in unlocked status"
+ " : 1->Set boundary in locked status");
+
+/**
+ * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105
+ },
+ .oobfree = {
+ {2, 4}, {18, 4}, {34, 4}, {50, 4},
+ {66, 4}, {82, 4}, {98, 4}, {114, 4}
+ }
+};
+
/**
* onenand_oob_64 - oob info for large (2KB) page
*/
@@ -65,6 +102,14 @@
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
};
/**
@@ -171,6 +216,70 @@
}
/**
+ * flexonenand_block- For given address return block number
+ * @param this - OneNAND device structure
+ * @param addr - Address for which block number is needed
+ */
+static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t)block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t)block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+EXPORT_SYMBOL(onenand_addr);
+
+/**
* onenand_get_density - [DEFAULT] Get OneNAND density
* @param dev_id OneNAND device ID
*
@@ -183,6 +292,22 @@
}
/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @param mtd MTD device structure
+ * @param addr address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+EXPORT_SYMBOL(flexonenand_region);
+
+/**
* onenand_command - [DEFAULT] Send command to OneNAND device
* @param mtd MTD device structure
* @param cmd the command to be sent
@@ -207,16 +332,28 @@
page = -1;
break;
- case ONENAND_CMD_ERASE:
- case ONENAND_CMD_BUFFERRAM:
- case ONENAND_CMD_OTP_ACCESS:
- block = (int) (addr >> this->erase_shift);
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
page = -1;
break;
+ case ONENAND_CMD_ERASE:
+ case ONENAND_CMD_BUFFERRAM:
+ case ONENAND_CMD_OTP_ACCESS:
+ block = onenand_block(this, addr);
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
default:
- block = (int) (addr >> this->erase_shift);
- page = (int) (addr >> this->page_shift);
+ block = onenand_block(this, addr);
+ page = (int) (addr - onenand_addr(this, block)) >> this->page_shift;
if (ONENAND_IS_2PLANE(this)) {
/* Make the even block number */
@@ -236,7 +373,7 @@
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_2PLANE(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -258,13 +395,18 @@
if (page != -1) {
/* Now we use page size operation */
- int sectors = 4, count = 4;
+ int sectors = 0, count = 0;
int dataram;
switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+ if (ONENAND_IS_MLC(this))
+ /* It is always BufferRAM0 */
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
break;
default:
@@ -293,6 +435,30 @@
}
/**
+ * onenand_read_ecc - return ecc status
+ * @param this onenand chip structure
+ */
+static inline int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i, result = 0;
+
+ if (!FLEXONENAND(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ else
+ result = ONENAND_ECC_1BIT_ALL;
+ }
+
+ return result;
+}
+
+/**
* onenand_wait - [DEFAULT] wait until the command is done
* @param mtd MTD device structure
* @param state state to select the max. timeout value
@@ -331,14 +497,14 @@
* power off recovery (POR) test, it should read ECC status first
*/
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc) {
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
- printk(KERN_INFO "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
+ printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.corrected++;
}
}
@@ -656,7 +822,7 @@
if (found && ONENAND_IS_DDP(this)) {
/* Select DataRAM for DDP */
- int block = (int) (addr >> this->erase_shift);
+ int block = onenand_block(this, addr);
int value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
@@ -816,6 +982,149 @@
}
/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @param mtd MTD device structure
+ * @param addr address to recover
+ * @param status return value from onenand_wait / onenand_bbt_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ /* We are attempting to reread, so decrement stats.failed
+ * which was incremented by onenand_wait due to read failure
+ */
+ printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n");
+ mtd->ecc_stats.failed--;
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops: oob operation description structure
+ *
+ * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
+ * So, read-while-load is not present.
+ */
+static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0;
+ int writesize = this->writesize;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if (from + len > mtd->size) {
+ printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n");
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = min_t(int, writesize, len - read);
+
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ read += thislen;
+ if (read == len)
+ break;
+
+ from += thislen;
+ buf += thislen;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
* onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
* @param mtd MTD device structure
* @param from offset to read from
@@ -962,7 +1271,7 @@
size_t len = ops->ooblen;
mtd_oob_mode_t mode = ops->mode;
u_char *buf = ops->oobbuf;
- int ret = 0;
+ int ret = 0, readcmd;
from += ops->ooboffs;
@@ -993,17 +1302,22 @@
stats = mtd->ecc_stats;
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret && ret != -EBADMSG) {
printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
break;
@@ -1053,6 +1367,7 @@
static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct onenand_chip *this = mtd->priv;
struct mtd_oob_ops ops = {
.len = len,
.ooblen = 0,
@@ -1062,7 +1377,9 @@
int ret;
onenand_get_device(mtd, FL_READING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
*retlen = ops.retlen;
@@ -1080,6 +1397,7 @@
static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
switch (ops->mode) {
@@ -1094,7 +1412,9 @@
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = onenand_read_ops_nolock(mtd, from, ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, ops) :
+ onenand_read_ops_nolock(mtd, from, ops);
else
ret = onenand_read_oob_nolock(mtd, from, ops);
onenand_release_device(mtd);
@@ -1128,11 +1448,11 @@
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
", controller error 0x%04x\n", ecc, ctrl);
- return ONENAND_BBT_READ_ERROR;
+ return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
printk(KERN_ERR "onenand_bbt_wait: read timeout!"
@@ -1163,7 +1483,7 @@
{
struct onenand_chip *this = mtd->priv;
int read = 0, thislen, column;
- int ret = 0;
+ int ret = 0, readcmd;
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
@@ -1183,17 +1503,22 @@
column = from & (mtd->oobsize - 1);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = mtd->oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
- ret = onenand_bbt_wait(mtd, FL_READING);
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret)
break;
@@ -1230,9 +1555,11 @@
{
struct onenand_chip *this = mtd->priv;
u_char *oob_buf = this->oob_buf;
- int status, i;
+ int status, i, readcmd;
- this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
status = this->wait(mtd, FL_READING);
if (status)
@@ -1633,7 +1960,7 @@
{
struct onenand_chip *this = mtd->priv;
int column, ret = 0, oobsize;
- int written = 0;
+ int written = 0, oobcmd;
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
@@ -1675,6 +2002,8 @@
oobbuf = this->oob_buf;
+ oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, oobsize, len - written);
@@ -1692,7 +2021,14 @@
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
+ if (ONENAND_IS_MLC(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
if (ONENAND_IS_2PLANE(this)) {
@@ -1815,41 +2151,57 @@
{
struct onenand_chip *this = mtd->priv;
unsigned int block_size;
- loff_t addr;
- int len;
- int ret = 0;
+ loff_t addr = instr->addr;
+ loff_t len = instr->len;
+ int ret = 0, i;
+ struct mtd_erase_region_info *region = NULL;
+ loff_t region_end = 0;
DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
- block_size = (1 << this->erase_shift);
-
- /* Start address must align on block boundary */
- if (unlikely(instr->addr & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ /* Do not allow erase past end of device */
+ if (unlikely((len + addr) > mtd->size)) {
+ printk(KERN_ERR "onenand_erase: Erase past end of device\n");
return -EINVAL;
}
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ i = flexonenand_region(mtd, addr);
+ region = &mtd->eraseregions[i];
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ if (unlikely((addr - region->offset) & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+ } else {
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely(addr & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+ }
+
/* Length must align on block boundary */
- if (unlikely(instr->len & (block_size - 1))) {
+ if (unlikely(len & (block_size - 1))) {
printk(KERN_ERR "onenand_erase: Length not block aligned\n");
return -EINVAL;
}
- /* Do not allow erase past end of device */
- if (unlikely((instr->len + instr->addr) > mtd->size)) {
- printk(KERN_ERR "onenand_erase: Erase past end of device\n");
- return -EINVAL;
- }
-
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_ERASING);
/* Loop throught the pages */
- len = instr->len;
- addr = instr->addr;
-
instr->state = MTD_ERASING;
while (len) {
@@ -1869,7 +2221,8 @@
ret = this->wait(mtd, FL_ERASING);
/* Check, if it is write protected */
if (ret) {
- printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", (unsigned) (addr >> this->erase_shift));
+ printk(KERN_ERR "onenand_erase: Failed erase, block %d\n",
+ onenand_block(this, addr));
instr->state = MTD_ERASE_FAILED;
instr->fail_addr = addr;
goto erase_exit;
@@ -1877,6 +2230,22 @@
len -= block_size;
addr += block_size;
+
+ if (addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* FIXME: This should be handled at MTD partitioning level. */
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ goto erase_exit;
+ }
+ }
+
}
instr->state = MTD_ERASE_DONE;
@@ -1955,13 +2324,17 @@
int block;
/* Get block number */
- block = ((int) ofs) >> bbm->bbt_erase_shift;
+ block = onenand_block(this, ofs);
if (bbm->bbt)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* We write two bytes, so we dont have to mess with 16 bit access */
ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
- return onenand_write_oob_nolock(mtd, ofs, &ops);
+ /* FIXME : What to do when marking SLC block in partition
+ * with MLC erasesize? For now, it is not advisable to
+ * create partitions containing both SLC and MLC regions.
+ */
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
}
/**
@@ -2005,8 +2378,8 @@
int start, end, block, value, status;
int wp_status_mask;
- start = ofs >> this->erase_shift;
- end = len >> this->erase_shift;
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len) - 1;
if (cmd == ONENAND_CMD_LOCK)
wp_status_mask = ONENAND_WP_LS;
@@ -2018,7 +2391,7 @@
/* Set start block address */
this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Set end block address */
- this->write_word(start + end - 1, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
/* Write lock command */
this->command(mtd, cmd, 0, 0);
@@ -2039,7 +2412,7 @@
}
/* Block lock scheme */
- for (block = start; block < start + end; block++) {
+ for (block = start; block < end + 1; block++) {
/* Set block address */
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
@@ -2147,7 +2520,7 @@
{
struct onenand_chip *this = mtd->priv;
loff_t ofs = 0;
- size_t len = this->chipsize;
+ loff_t len = mtd->size;
if (this->options & ONENAND_HAS_UNLOCK_ALL) {
/* Set start block address */
@@ -2163,12 +2536,16 @@
& ONENAND_CTRL_ONGO)
continue;
+ /* Don't check lock status */
+ if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
+ return;
+
/* Check lock status */
if (onenand_check_lock_status(this))
return;
/* Workaround for all block unlock in DDP */
- if (ONENAND_IS_DDP(this)) {
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
/* All blocks on another chip */
ofs = this->chipsize >> 1;
len = this->chipsize >> 1;
@@ -2210,7 +2587,9 @@
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2277,21 +2656,32 @@
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
- .ooblen = len,
- .oobbuf = buf,
- .ooboffs = 0,
- };
+ struct mtd_oob_ops ops;
int ret;
/* Enter OTP access mode */
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_write_oob_nolock(mtd, from, &ops);
-
- *retlen = ops.oobretlen;
+ if (FLEXONENAND(this)) {
+ /*
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+ ops.len = mtd->writesize;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
+ *retlen = ops.retlen;
+ } else {
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ret = onenand_write_oob_nolock(mtd, from, &ops);
+ *retlen = ops.oobretlen;
+ }
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2475,27 +2865,34 @@
size_t len)
{
struct onenand_chip *this = mtd->priv;
- u_char *oob_buf = this->oob_buf;
+ u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
size_t retlen;
int ret;
- memset(oob_buf, 0xff, mtd->oobsize);
+ memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
+ : mtd->oobsize);
/*
* Note: OTP lock operation
* OTP block : 0xXXFC
* 1st block : 0xXXF3 (If chip support)
* Both : 0xXXF0 (If chip support)
*/
- oob_buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ if (FLEXONENAND(this))
+ buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ else
+ buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
/*
* Write lock mark to 8th word of sector0 of page0 of the spare0.
* We write 16 bytes spare area instead of 2 bytes.
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
*/
- from = 0;
- len = 16;
- ret = onenand_otp_walk(mtd, from, len, &retlen, oob_buf, do_otp_lock, MTD_OTP_USER);
+ from = 0;
+ len = FLEXONENAND(this) ? mtd->writesize : 16;
+
+ ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
return ret ? : retlen;
}
@@ -2542,6 +2939,14 @@
break;
}
+ if (ONENAND_IS_MLC(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
if (this->options & ONENAND_HAS_CONT_LOCK)
printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
if (this->options & ONENAND_HAS_UNLOCK_ALL)
@@ -2559,14 +2964,16 @@
*/
static void onenand_print_device_info(int device, int version)
{
- int vcc, demuxed, ddp, density;
+ int vcc, demuxed, ddp, density, flexonenand;
vcc = device & ONENAND_DEVICE_VCC_MASK;
demuxed = device & ONENAND_DEVICE_IS_DEMUX;
ddp = device & ONENAND_DEVICE_IS_DDP;
density = onenand_get_density(device);
- printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
- demuxed ? "" : "Muxed ",
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
ddp ? "(DDP)" : "",
(16 << density),
vcc ? "2.65/3.3" : "1.8",
@@ -2576,6 +2983,7 @@
static const struct onenand_manufacturers onenand_manuf_ids[] = {
{ONENAND_MFR_SAMSUNG, "Samsung"},
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
};
/**
@@ -2605,6 +3013,261 @@
}
/**
+* flexonenand_get_boundary - Reads the SLC boundary
+* @param onenand_info - onenand info structure
+**/
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned die, bdry;
+ int ret, syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ ret = this->wait(mtd, FL_RESETING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize
+ * @param mtd - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
+ " numblocks: %04u]\n",
+ (unsigned int) mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
+ this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @param mtd_info - mtd info structure
+ * @param start - first erase block to check
+ * @param end - last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OOB_PLACE,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "Block %d not erased.\n", block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * flexonenand_set_boundary - Writes the SLC boundary
+ * @param mtd - mtd info structure
+ */
+int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ /* Change only once for SDP Flex-OneNAND */
+ if (die && (!ONENAND_IS_DDP(this)))
+ return 0;
+
+ /* boundary value of -1 indicates no required change */
+ if (boundary < 0 || boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. "
+ "Boundary not changed.\n");
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
+ ret = 1;
+ goto out;
+ }
+
+ printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ addr = die ? this->diesize[0] : 0;
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
* onenand_probe - [OneNAND Interface] Probe the OneNAND device
* @param mtd MTD device structure
*
@@ -2621,7 +3284,7 @@
/* Save system configuration 1 */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
/* Clear Sync. Burst Read mode to read BootRAM */
- this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
/* Send the command for reading device ID from BootRAM */
this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
@@ -2646,6 +3309,7 @@
maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
/* Check OneNAND device */
if (maf_id != bram_maf_id || dev_id != bram_dev_id)
@@ -2657,29 +3321,55 @@
this->version_id = ver_id;
density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
+ * (this->dies << 1), GFP_KERNEL);
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
this->chipsize = (16 << density) << 20;
- /* Set density mask. it is used for DDP */
- if (ONENAND_IS_DDP(this))
- this->density_mask = (1 << (density + 6));
- else
- this->density_mask = 0;
/* OneNAND page size & block size */
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_MLC(this))
+ mtd->writesize <<= 1;
+
mtd->oobsize = mtd->writesize >> 5;
/* Pages per a block are always 64 in OneNAND */
mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
this->erase_shift = ffs(mtd->erasesize) - 1;
this->page_shift = ffs(mtd->writesize) - 1;
this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
/* It's real page size */
this->writesize = mtd->writesize;
/* REVIST: Multichip handling */
- mtd->size = this->chipsize;
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
/* Check OneNAND features */
onenand_check_features(mtd);
@@ -2734,7 +3424,7 @@
*/
int onenand_scan(struct mtd_info *mtd, int maxchips)
{
- int i;
+ int i, ret;
struct onenand_chip *this = mtd->priv;
if (!this->read_word)
@@ -2746,6 +3436,10 @@
this->command = onenand_command;
if (!this->wait)
onenand_setup_wait(mtd);
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+ if (!this->unlock_all)
+ this->unlock_all = onenand_unlock_all;
if (!this->read_bufferram)
this->read_bufferram = onenand_read_bufferram;
@@ -2796,6 +3490,10 @@
* Allow subpage writes up to oobsize.
*/
switch (mtd->oobsize) {
+ case 128:
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 0;
+ break;
case 64:
this->ecclayout = &onenand_oob_64;
mtd->subpage_sft = 2;
@@ -2859,9 +3557,18 @@
mtd->owner = THIS_MODULE;
/* Unlock whole block */
- onenand_unlock_all(mtd);
+ this->unlock_all(mtd);
- return this->scan_bbt(mtd);
+ ret = this->scan_bbt(mtd);
+ if ((!FLEXONENAND(this)) || ret)
+ return ret;
+
+ /* Change Flex-OneNAND boundaries if required */
+ for (i = 0; i < MAX_DIES; i++)
+ flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
+ flex_bdry[(2 * i) + 1]);
+
+ return 0;
}
/**
@@ -2890,6 +3597,7 @@
kfree(this->page_buf);
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
+ kfree(mtd->eraseregions);
}
EXPORT_SYMBOL_GPL(onenand_scan);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 2f53b51..a91fcac 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -63,6 +63,7 @@
loff_t from;
size_t readlen, ooblen;
struct mtd_oob_ops ops;
+ int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
@@ -76,7 +77,7 @@
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
- numblocks = mtd->size >> (bbm->bbt_erase_shift - 1);
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
@@ -106,7 +107,12 @@
}
}
i += 2;
- from += (1 << bbm->bbt_erase_shift);
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
}
return 0;
@@ -143,7 +149,7 @@
uint8_t res;
/* Get block number * 2 */
- block = (int) (offs >> (bbm->bbt_erase_shift - 1));
+ block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
@@ -178,7 +184,7 @@
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
- len = mtd->size >> (this->erase_shift + 2);
+ len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index d64200b..f6e3c8a 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -6,6 +6,10 @@
* Copyright © 2005-2007 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND simulator support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -24,16 +28,38 @@
#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
#endif
+
#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
#endif
+
+#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
+
#ifndef CONFIG_ONENAND_SIM_VERSION_ID
#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
#endif
+#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
+#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
+#endif
+
+/* Initial boundary values for Flex-OneNAND Simulator */
+#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
+#endif
+
+#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
+#endif
+
static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
+static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
+static int boundary[] = {
+ CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
+ CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
+};
struct onenand_flash {
void __iomem *base;
@@ -57,12 +83,18 @@
(writew(v, this->base + ONENAND_REG_WP_STATUS))
/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE (2048 + 64)
+#define MAX_ONENAND_PAGESIZE (4096 + 128)
static unsigned char *ffchars;
+#if CONFIG_FLEXONENAND
+#define PARTITION_NAME "Flex-OneNAND simulator partition"
+#else
+#define PARTITION_NAME "OneNAND simulator partition"
+#endif
+
static struct mtd_partition os_partitions[] = {
{
- .name = "OneNAND simulator partition",
+ .name = PARTITION_NAME,
.offset = 0,
.size = MTDPART_SIZ_FULL,
},
@@ -104,6 +136,7 @@
switch (cmd) {
case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_UNLOCK_ALL:
if (block_lock_scheme)
ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
else
@@ -228,10 +261,12 @@
{
struct mtd_info *mtd = &info->mtd;
struct onenand_flash *flash = this->priv;
- int main_offset, spare_offset;
+ int main_offset, spare_offset, die = 0;
void __iomem *src;
void __iomem *dest;
unsigned int i;
+ static int pi_operation;
+ int erasesize, rgn;
if (dataram) {
main_offset = mtd->writesize;
@@ -241,10 +276,27 @@
spare_offset = 0;
}
+ if (pi_operation) {
+ die = readw(this->base + ONENAND_REG_START_ADDRESS2);
+ die >>= ONENAND_DDP_SHIFT;
+ }
+
switch (cmd) {
+ case FLEXONENAND_CMD_PI_ACCESS:
+ pi_operation = 1;
+ break;
+
+ case ONENAND_CMD_RESET:
+ pi_operation = 0;
+ break;
+
case ONENAND_CMD_READ:
src = ONENAND_CORE(flash) + offset;
dest = ONENAND_MAIN_AREA(this, main_offset);
+ if (pi_operation) {
+ writew(boundary[die], this->base + ONENAND_DATARAM);
+ break;
+ }
memcpy(dest, src, mtd->writesize);
/* Fall through */
@@ -257,6 +309,10 @@
case ONENAND_CMD_PROG:
src = ONENAND_MAIN_AREA(this, main_offset);
dest = ONENAND_CORE(flash) + offset;
+ if (pi_operation) {
+ boundary[die] = readw(this->base + ONENAND_DATARAM);
+ break;
+ }
/* To handle partial write */
for (i = 0; i < (1 << mtd->subpage_sft); i++) {
int off = i * this->subpagesize;
@@ -284,9 +340,18 @@
break;
case ONENAND_CMD_ERASE:
- memset(ONENAND_CORE(flash) + offset, 0xff, mtd->erasesize);
+ if (pi_operation)
+ break;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, offset);
+ erasesize = mtd->eraseregions[rgn].erasesize;
+ } else
+ erasesize = mtd->erasesize;
+
+ memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
- (mtd->erasesize >> 5));
+ (erasesize >> 5));
break;
default:
@@ -339,7 +404,7 @@
}
if (block != -1)
- offset += block << this->erase_shift;
+ offset = onenand_addr(this, block);
if (page != -1)
offset += page << this->page_shift;
@@ -390,6 +455,7 @@
}
density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density &= ONENAND_DEVICE_DENSITY_MASK;
size = ((16 << 20) << density);
ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
@@ -405,8 +471,9 @@
writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
+ writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
- if (density < 2)
+ if (density < 2 && (!CONFIG_FLEXONENAND))
buffer_size = 0x0400; /* 1KiB page */
else
buffer_size = 0x0800; /* 2KiB page */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 892a9e4..1dc7215 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2443,6 +2443,17 @@
To compile this driver as a module, choose M here. The module
will be called jme.
+config S6GMAC
+ tristate "S6105 GMAC ethernet support"
+ depends on XTENSA_VARIANT_S6000
+ select PHYLIB
+ help
+ This driver supports the on chip ethernet device on the
+ S6105 xtensa processor.
+
+ To compile this driver as a module, choose M here. The module
+ will be called s6gmac.
+
endif # NETDEV_1000
#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d366fb2..4b58a59 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -245,6 +245,7 @@
obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_MACB) += macb.o
+obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_ARM) += arm/
obj-$(CONFIG_DEV_APPLETALK) += appletalk/
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b4bb06f..f703758 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -65,7 +65,7 @@
#define TX_CQ_LEN 1024
#define RX_Q_LEN 1024 /* Does not support any other value */
#define RX_CQ_LEN 1024
-#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
+#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
#define BE_NAPI_WEIGHT 64
@@ -91,6 +91,61 @@
atomic_t used; /* Number of valid elements in the queue */
};
+static inline u32 MODULO(u16 val, u16 limit)
+{
+ BUG_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_adv(u16 *index, u16 val, u16 limit)
+{
+ *index = MODULO((*index + val), limit);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+
+struct be_eq_obj {
+ struct be_queue_info q;
+ char desc[32];
+
+ /* Adaptive interrupt coalescing (AIC) info */
+ bool enable_aic;
+ u16 min_eqd; /* in usecs */
+ u16 max_eqd; /* in usecs */
+ u16 cur_eqd; /* in usecs */
+
+ struct napi_struct napi;
+};
+
+struct be_mcc_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+};
+
struct be_ctrl_info {
u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
@@ -98,11 +153,20 @@
int pci_func;
/* Mbox used for cmd request/response */
- spinlock_t cmd_lock; /* For serializing cmds to BE card */
+ spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
struct be_dma_mem mbox_mem_alloced;
+
+ /* MCC Rings */
+ struct be_mcc_obj mcc_obj;
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ /* MCC Async callback */
+ void (*async_cb)(void *adapter, bool link_up);
+ void *adapter_ctxt;
};
#include "be_cmds.h"
@@ -150,19 +214,6 @@
struct be_dma_mem cmd;
};
-struct be_eq_obj {
- struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u16 min_eqd; /* in usecs */
- u16 max_eqd; /* in usecs */
- u16 cur_eqd; /* in usecs */
-
- struct napi_struct napi;
-};
-
struct be_tx_obj {
struct be_queue_info q;
struct be_queue_info cq;
@@ -225,8 +276,9 @@
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
- struct be_link_info link;
+ bool link_up;
u32 port_num;
+ bool promiscuous;
};
extern struct ethtool_ops be_ethtool_ops;
@@ -235,22 +287,6 @@
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
-static inline u32 MODULO(u16 val, u16 limit)
-{
- BUG_ON(limit & (limit - 1));
- return val & (limit - 1);
-}
-
-static inline void index_adv(u16 *index, u16 val, u16 limit)
-{
- *index = MODULO((*index + val), limit);
-}
-
-static inline void index_inc(u16 *index, u16 limit)
-{
- *index = MODULO((*index + 1), limit);
-}
-
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
@@ -339,4 +375,6 @@
return val;
}
+extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
+ u16 num_popped);
#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d444aed..583517e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -17,6 +17,133 @@
#include "be.h"
+static void be_mcc_notify(struct be_ctrl_info *ctrl)
+{
+ struct be_queue_info *mccq = &ctrl->mcc_obj.q;
+ u32 val = 0;
+
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+ iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
+}
+
+/* To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new entry is
+ * little endian) */
+static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
+{
+ if (compl->flags != 0) {
+ compl->flags = le32_to_cpu(compl->flags);
+ BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* Need to reset the entire word that houses the valid bit */
+static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
+{
+ compl->flags = 0;
+}
+
+static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
+ struct be_mcc_cq_entry *compl)
+{
+ u16 compl_status, extd_status;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ if (compl_status != MCC_STATUS_SUCCESS) {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ printk(KERN_WARNING DRV_NAME
+ " error in cmd completion: status(compl/extd)=%d/%d\n",
+ compl_status, extd_status);
+ return -1;
+ }
+ return 0;
+}
+
+/* Link state evt is a string of bytes; no need for endian swapping */
+static void be_async_link_state_process(struct be_ctrl_info *ctrl,
+ struct be_async_event_link_state *evt)
+{
+ ctrl->async_cb(ctrl->adapter_ctxt,
+ evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
+}
+
+static inline bool is_link_state_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE);
+}
+
+static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
+{
+ struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
+ struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
+
+ if (be_mcc_compl_is_new(compl)) {
+ queue_tail_inc(mcc_cq);
+ return compl;
+ }
+ return NULL;
+}
+
+void be_process_mcc(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_cq_entry *compl;
+ int num = 0;
+
+ spin_lock_bh(&ctrl->mcc_cq_lock);
+ while ((compl = be_mcc_compl_get(ctrl))) {
+ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ BUG_ON(!is_link_state_evt(compl->flags));
+
+ /* Interpret compl as a async link evt */
+ be_async_link_state_process(ctrl,
+ (struct be_async_event_link_state *) compl);
+ } else {
+ be_mcc_compl_process(ctrl, compl);
+ atomic_dec(&ctrl->mcc_obj.q.used);
+ }
+ be_mcc_compl_use(compl);
+ num++;
+ }
+ if (num)
+ be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
+ spin_unlock_bh(&ctrl->mcc_cq_lock);
+}
+
+/* Wait till no more pending mcc requests are present */
+static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
+{
+#define mcc_timeout 50000 /* 5s timeout */
+ int i;
+ for (i = 0; i < mcc_timeout; i++) {
+ be_process_mcc(ctrl);
+ if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
+ break;
+ udelay(100);
+ }
+ if (i == mcc_timeout)
+ printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
+}
+
+/* Notify MCC requests and wait for completion */
+static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
+{
+ be_mcc_notify(ctrl);
+ be_mcc_wait_compl(ctrl);
+}
+
static int be_mbox_db_ready_wait(void __iomem *db)
{
int cnt = 0, wait = 5;
@@ -44,11 +171,11 @@
/*
* Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
*/
static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
{
int status;
- u16 compl_status, extd_status;
u32 val = 0;
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
@@ -79,24 +206,17 @@
if (status != 0)
return status;
- /* compl entry has been made now */
- be_dws_le_to_cpu(cqe, sizeof(*cqe));
- if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) {
- printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n");
+ /* A cq entry has been made now */
+ if (be_mcc_compl_is_new(cqe)) {
+ status = be_mcc_compl_process(ctrl, &mbox->cqe);
+ be_mcc_compl_use(cqe);
+ if (status)
+ return status;
+ } else {
+ printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
return -1;
}
-
- compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
- if (compl_status != MCC_STATUS_SUCCESS) {
- extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
- printk(KERN_WARNING DRV_NAME
- ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
- compl_status, extd_status);
- }
-
- return compl_status;
+ return 0;
}
static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
@@ -235,6 +355,18 @@
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
}
+static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
+{
+ struct be_mcc_wrb *wrb = NULL;
+ if (atomic_read(&mccq->used) < mccq->len) {
+ wrb = queue_head_node(mccq);
+ queue_head_inc(mccq);
+ atomic_inc(&mccq->used);
+ memset(wrb, 0, sizeof(*wrb));
+ }
+ return wrb;
+}
+
int be_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay)
{
@@ -244,7 +376,7 @@
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -272,7 +404,7 @@
eq->id = le16_to_cpu(resp->eq_id);
eq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -284,7 +416,7 @@
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -304,7 +436,7 @@
if (!status)
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -315,7 +447,7 @@
struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -332,7 +464,7 @@
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -342,7 +474,7 @@
struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -354,7 +486,7 @@
req->pmac_id = cpu_to_le32(pmac_id);
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -370,7 +502,7 @@
void *ctxt = &req->context;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -388,7 +520,7 @@
AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -399,7 +531,56 @@
cq->id = le16_to_cpu(resp->cq_id);
cq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt = &req->context;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+ AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
+ AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_db_ring(ctrl);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -415,7 +596,7 @@
int status;
u32 len_encoded;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -446,7 +627,7 @@
txq->id = le16_to_cpu(resp->cid);
txq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -460,7 +641,7 @@
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -482,7 +663,7 @@
rxq->id = le16_to_cpu(resp->id);
rxq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -496,7 +677,7 @@
u8 subsys = 0, opcode = 0;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -518,6 +699,10 @@
subsys = CMD_SUBSYSTEM_ETH;
opcode = OPCODE_ETH_RX_DESTROY;
break;
+ case QTYPE_MCCQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_MCC_DESTROY;
+ break;
default:
printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
status = -1;
@@ -528,7 +713,7 @@
status = be_mbox_db_ring(ctrl);
err:
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -541,7 +726,7 @@
struct be_cmd_req_if_create *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -562,7 +747,7 @@
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -572,7 +757,7 @@
struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -583,7 +768,7 @@
req->interface_id = cpu_to_le32(interface_id);
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -598,7 +783,7 @@
struct be_sge *sge = nonembedded_sgl(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
memset(req, 0, sizeof(*req));
@@ -617,18 +802,20 @@
be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
- struct be_link_info *link)
+ bool *link_up)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_link_status *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
+
+ *link_up = false;
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -639,14 +826,11 @@
status = be_mbox_db_ring(ctrl);
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
- link->speed = resp->mac_speed;
- link->duplex = resp->mac_duplex;
- link->fault = resp->mac_fault;
- } else {
- link->speed = PHY_LINK_SPEED_ZERO;
+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
+ *link_up = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -656,7 +840,7 @@
struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -670,7 +854,7 @@
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -681,7 +865,7 @@
struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -696,7 +880,7 @@
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -707,7 +891,7 @@
struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -726,18 +910,22 @@
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
+/* Use MCC for this command as it may be called in BH context */
int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
- int status;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_promiscuous_config *req;
- spin_lock(&ctrl->cmd_lock);
- memset(wrb, 0, sizeof(*wrb));
+ spin_lock_bh(&ctrl->mcc_lock);
+
+ wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+ BUG_ON(!wrb);
+
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -749,21 +937,29 @@
else
req->port0_promiscuous = en;
- status = be_mbox_db_ring(ctrl);
+ be_mcc_notify_wait(ctrl);
- spin_unlock(&ctrl->cmd_lock);
- return status;
+ spin_unlock_bh(&ctrl->mcc_lock);
+ return 0;
}
-int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
- u32 num, bool promiscuous)
+/*
+ * Use MCC for this command as it may be called in BH context
+ * (mc == NULL) => multicast promiscous
+ */
+int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+ struct dev_mc_list *mc_list, u32 mc_count)
{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
- int status;
+#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcast_mac_config *req;
- spin_lock(&ctrl->cmd_lock);
- memset(wrb, 0, sizeof(*wrb));
+ spin_lock_bh(&ctrl->mcc_lock);
+
+ wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+ BUG_ON(!wrb);
+
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -771,17 +967,23 @@
OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
req->interface_id = if_id;
- req->promiscuous = promiscuous;
- if (!promiscuous) {
- req->num_mac = cpu_to_le16(num);
- if (num)
- memcpy(req->mac, mac_table, ETH_ALEN * num);
+ if (mc_list && mc_count <= BE_MAX_MC) {
+ int i;
+ struct dev_mc_list *mc;
+
+ req->num_mac = cpu_to_le16(mc_count);
+
+ for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
+ memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+ } else {
+ req->promiscuous = 1;
}
- status = be_mbox_db_ring(ctrl);
+ be_mcc_notify_wait(ctrl);
- spin_unlock(&ctrl->cmd_lock);
- return status;
+ spin_unlock_bh(&ctrl->mcc_lock);
+
+ return 0;
}
int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
@@ -790,7 +992,7 @@
struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -804,7 +1006,7 @@
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -814,7 +1016,7 @@
struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -831,7 +1033,7 @@
*rx_fc = le16_to_cpu(resp->rx_flow_control);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -841,7 +1043,7 @@
struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -856,6 +1058,6 @@
*port_num = le32_to_cpu(resp->phys_port);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e499e2d..747626d 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -76,6 +76,34 @@
u32 flags; /* dword 3 */
};
+/* When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+struct be_async_event_trailer {
+ u32 code;
+};
+
+enum {
+ ASYNC_EVENT_LINK_DOWN = 0x0,
+ ASYNC_EVENT_LINK_UP = 0x1
+};
+
+/* When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+ u8 port_fault;
+ u8 rsvd0[7];
+ struct be_async_event_trailer trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_cq_entry cqe;
@@ -101,6 +129,7 @@
#define OPCODE_COMMON_FIRMWARE_CONFIG 42
#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
+#define OPCODE_COMMON_MCC_DESTROY 53
#define OPCODE_COMMON_CQ_DESTROY 54
#define OPCODE_COMMON_EQ_DESTROY 55
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
@@ -269,6 +298,38 @@
u16 rsvd0;
} __packed;
+/******************** Create MCCQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_mcc_context {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 rsvd0;
+ u8 context[sizeof(struct amap_mcc_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
/******************** Create TxQ ***************************/
#define BE_ETH_TX_RING_TYPE_STANDARD 2
#define BE_ULP1_NUM 1
@@ -341,7 +402,8 @@
QTYPE_EQ = 1,
QTYPE_CQ,
QTYPE_TXQ,
- QTYPE_RXQ
+ QTYPE_RXQ,
+ QTYPE_MCCQ
};
struct be_cmd_req_q_destroy {
@@ -546,12 +608,6 @@
u32 rsvd;
};
-struct be_link_info {
- u8 duplex;
- u8 speed;
- u8 fault;
-};
-
enum {
PHY_LINK_DUPLEX_NONE = 0x0,
PHY_LINK_DUPLEX_HALF = 0x1,
@@ -657,6 +713,9 @@
struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay,
int num_cqe_dma_coalesce);
+extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq);
extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *txq,
struct be_queue_info *cq);
@@ -667,7 +726,7 @@
extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
int type);
extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
- struct be_link_info *link);
+ bool *link_up);
extern int be_cmd_reset(struct be_ctrl_info *ctrl);
extern int be_cmd_get_stats(struct be_ctrl_info *ctrl,
struct be_dma_mem *nonemb_cmd);
@@ -679,10 +738,11 @@
bool promiscuous);
extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl,
u8 port_num, bool en);
-extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id,
- u8 *mac_table, u32 num, bool promiscuous);
+extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+ struct dev_mc_list *mc_list, u32 mc_count);
extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
u32 *tx_fc, u32 *rx_fc);
extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
+extern void be_process_mcc(struct be_ctrl_info *ctrl);
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index b132aa4..b02e805 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -61,7 +61,7 @@
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
-#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
/* Number of event entries processed */
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -88,6 +88,12 @@
/* Number of rx frags posted */
#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+
/*
* BE descriptors: host memory data structures whose formats
* are hardwired in BE silicon.
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66bb568..66c10c8 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -60,26 +60,6 @@
return 0;
}
-static inline void *queue_head_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->head * q->entry_size;
-}
-
-static inline void *queue_tail_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->tail * q->entry_size;
-}
-
-static inline void queue_head_inc(struct be_queue_info *q)
-{
- index_inc(&q->head, q->len);
-}
-
-static inline void queue_tail_inc(struct be_queue_info *q)
-{
- index_inc(&q->tail, q->len);
-}
-
static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
{
u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
@@ -127,7 +107,7 @@
iowrite32(val, ctrl->db + DB_EQ_OFFSET);
}
-static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
+void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
bool arm, u16 num_popped)
{
u32 val = 0;
@@ -234,28 +214,24 @@
dev_stats->tx_window_errors = 0;
}
-static void be_link_status_update(struct be_adapter *adapter)
+void be_link_status_update(void *ctxt, bool link_up)
{
- struct be_link_info *prev = &adapter->link;
- struct be_link_info now = { 0 };
+ struct be_adapter *adapter = ctxt;
struct net_device *netdev = adapter->netdev;
- be_cmd_link_status_query(&adapter->ctrl, &now);
-
/* If link came up or went down */
- if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
- prev->speed == PHY_LINK_SPEED_ZERO)) {
- if (now.speed == PHY_LINK_SPEED_ZERO) {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
- printk(KERN_INFO "%s: Link down\n", netdev->name);
- } else {
+ if (adapter->link_up != link_up) {
+ if (link_up) {
netif_start_queue(netdev);
netif_carrier_on(netdev);
printk(KERN_INFO "%s: Link up\n", netdev->name);
+ } else {
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ printk(KERN_INFO "%s: Link down\n", netdev->name);
}
+ adapter->link_up = link_up;
}
- *prev = now;
}
/* Update the EQ delay n BE based on the RX frags consumed / sec */
@@ -569,47 +545,32 @@
be_vid_config(netdev);
}
-static void be_set_multicast_filter(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
- u8 mac_addr[32][ETH_ALEN];
- int i = 0;
-
- if (netdev->flags & IFF_ALLMULTI) {
- /* set BE in Multicast promiscuous */
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, NULL, 0, true);
- return;
- }
-
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
- memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
- if (++i >= 32) {
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, &mac_addr[0][0], i, false);
- i = 0;
- }
-
- }
-
- if (i) {
- /* reset the promiscuous mode also. */
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, &mac_addr[0][0], i, false);
- }
-}
-
static void be_set_multicast_list(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
if (netdev->flags & IFF_PROMISC) {
- be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
- } else {
- be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
- be_set_multicast_filter(netdev);
+ be_cmd_promiscuous_config(ctrl, adapter->port_num, 1);
+ adapter->promiscuous = true;
+ goto done;
}
+
+ /* BE was previously in promiscous mode; disable it */
+ if (adapter->promiscuous) {
+ adapter->promiscuous = false;
+ be_cmd_promiscuous_config(ctrl, adapter->port_num, 0);
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0);
+ goto done;
+ }
+
+ be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list,
+ netdev->mc_count);
+done:
+ return;
}
static void be_rx_rate_update(struct be_adapter *adapter)
@@ -960,10 +921,8 @@
return;
}
-static struct be_eth_tx_compl *
-be_tx_compl_get(struct be_adapter *adapter)
+static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
{
- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
@@ -1051,6 +1010,59 @@
}
}
+static void be_mcc_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ q = &ctrl->mcc_obj.q;
+ if (q->created)
+ be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
+ be_queue_free(adapter, q);
+
+ q = &ctrl->mcc_obj.cq;
+ if (q->created)
+ be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+}
+
+/* Must be called only after TX qs are created as MCC shares TX EQ */
+static int be_mcc_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *q, *cq;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ /* Alloc MCC compl queue */
+ cq = &ctrl->mcc_obj.cq;
+ if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
+ sizeof(struct be_mcc_cq_entry)))
+ goto err;
+
+ /* Ask BE to create MCC compl queue; share TX's eq */
+ if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
+ goto mcc_cq_free;
+
+ /* Alloc MCC queue */
+ q = &ctrl->mcc_obj.q;
+ if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+ goto mcc_cq_destroy;
+
+ /* Ask BE to create MCC queue */
+ if (be_cmd_mccq_create(ctrl, q, cq))
+ goto mcc_q_free;
+
+ return 0;
+
+mcc_q_free:
+ be_queue_free(adapter, q);
+mcc_cq_destroy:
+ be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
+mcc_cq_free:
+ be_queue_free(adapter, cq);
+err:
+ return -1;
+}
+
static void be_tx_queues_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
@@ -1263,7 +1275,7 @@
return IRQ_HANDLED;
}
-static irqreturn_t be_msix_tx(int irq, void *dev)
+static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
{
struct be_adapter *adapter = dev;
@@ -1324,40 +1336,51 @@
return work_done;
}
-/* For TX we don't honour budget; consume everything */
-int be_poll_tx(struct napi_struct *napi, int budget)
+void be_process_tx(struct be_adapter *adapter)
{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
- struct be_tx_obj *tx_obj = &adapter->tx_obj;
- struct be_queue_info *tx_cq = &tx_obj->cq;
- struct be_queue_info *txq = &tx_obj->q;
+ struct be_queue_info *txq = &adapter->tx_obj.q;
+ struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
u32 num_cmpl = 0;
u16 end_idx;
- while ((txcp = be_tx_compl_get(adapter))) {
+ while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
be_tx_compl_process(adapter, end_idx);
num_cmpl++;
}
- /* As Tx wrbs have been freed up, wake up netdev queue if
- * it was stopped due to lack of tx wrbs.
- */
- if (netif_queue_stopped(adapter->netdev) &&
+ if (num_cmpl) {
+ be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+
+ /* As Tx wrbs have been freed up, wake up netdev queue if
+ * it was stopped due to lack of tx wrbs.
+ */
+ if (netif_queue_stopped(adapter->netdev) &&
atomic_read(&txq->used) < txq->len / 2) {
- netif_wake_queue(adapter->netdev);
+ netif_wake_queue(adapter->netdev);
+ }
+
+ drvr_stats(adapter)->be_tx_events++;
+ drvr_stats(adapter)->be_tx_compl += num_cmpl;
}
+}
+
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
napi_complete(napi);
- be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+ be_process_tx(adapter);
- drvr_stats(adapter)->be_tx_events++;
- drvr_stats(adapter)->be_tx_compl += num_cmpl;
+ be_process_mcc(&adapter->ctrl);
return 1;
}
@@ -1368,9 +1391,6 @@
container_of(work, struct be_adapter, work.work);
int status;
- /* Check link */
- be_link_status_update(adapter);
-
/* Get Stats */
status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
if (!status)
@@ -1419,7 +1439,7 @@
sprintf(tx_eq->desc, "%s-tx", netdev->name);
vec = be_msix_vec_get(adapter, tx_eq->q.id);
- status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
+ status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
if (status)
goto err;
@@ -1495,6 +1515,39 @@
struct be_ctrl_info *ctrl = &adapter->ctrl;
struct be_eq_obj *rx_eq = &adapter->rx_eq;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ bool link_up;
+ int status;
+
+ /* First time posting */
+ be_post_rx_frags(adapter);
+
+ napi_enable(&rx_eq->napi);
+ napi_enable(&tx_eq->napi);
+
+ be_irq_register(adapter);
+
+ be_intr_set(ctrl, true);
+
+ /* The evt queues are created in unarmed state; arm them */
+ be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
+ be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
+
+ /* Rx compl queue may be in unarmed state; rearm it */
+ be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
+
+ status = be_cmd_link_status_query(ctrl, &link_up);
+ if (status)
+ return status;
+ be_link_status_update(adapter, link_up);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+ return 0;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+ struct net_device *netdev = adapter->netdev;
u32 if_flags;
int status;
@@ -1521,29 +1574,14 @@
if (status != 0)
goto tx_qs_destroy;
- /* First time posting */
- be_post_rx_frags(adapter);
+ status = be_mcc_queues_create(adapter);
+ if (status != 0)
+ goto rx_qs_destroy;
- napi_enable(&rx_eq->napi);
- napi_enable(&tx_eq->napi);
-
- be_irq_register(adapter);
-
- be_intr_set(ctrl, true);
-
- /* The evt queues are created in the unarmed state; arm them */
- be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
- be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
-
- /* The compl queues are created in the unarmed state; arm them */
- be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
- be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
-
- be_link_status_update(adapter);
-
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
+rx_qs_destroy:
+ be_rx_queues_destroy(adapter);
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
@@ -1552,6 +1590,19 @@
return status;
}
+static int be_clear(struct be_adapter *adapter)
+{
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ be_rx_queues_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+
+ be_cmd_if_destroy(ctrl, adapter->if_handle);
+
+ be_mcc_queues_destroy(adapter);
+ return 0;
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1564,7 +1615,7 @@
netif_stop_queue(netdev);
netif_carrier_off(netdev);
- adapter->link.speed = PHY_LINK_SPEED_ZERO;
+ adapter->link_up = false;
be_intr_set(ctrl, false);
@@ -1581,10 +1632,6 @@
napi_disable(&rx_eq->napi);
napi_disable(&tx_eq->napi);
- be_rx_queues_destroy(adapter);
- be_tx_queues_destroy(adapter);
-
- be_cmd_if_destroy(ctrl, adapter->if_handle);
return 0;
}
@@ -1673,7 +1720,7 @@
netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
BE_NAPI_WEIGHT);
- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
+ netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
netif_carrier_off(netdev);
@@ -1755,7 +1802,12 @@
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
- spin_lock_init(&ctrl->cmd_lock);
+ spin_lock_init(&ctrl->mbox_lock);
+ spin_lock_init(&ctrl->mcc_lock);
+ spin_lock_init(&ctrl->mcc_cq_lock);
+
+ ctrl->async_cb = be_link_status_update;
+ ctrl->adapter_ctxt = adapter;
val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
@@ -1793,6 +1845,8 @@
unregister_netdev(adapter->netdev);
+ be_clear(adapter);
+
be_stats_cleanup(adapter);
be_ctrl_cleanup(adapter);
@@ -1890,13 +1944,18 @@
be_netdev_init(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
+ status = be_setup(adapter);
+ if (status)
+ goto stats_clean;
status = register_netdev(netdev);
if (status != 0)
- goto stats_clean;
+ goto unsetup;
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
return 0;
+unsetup:
+ be_clear(adapter);
stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
@@ -1921,6 +1980,7 @@
if (netif_running(netdev)) {
rtnl_lock();
be_close(netdev);
+ be_clear(adapter);
rtnl_unlock();
}
@@ -1947,6 +2007,7 @@
if (netif_running(netdev)) {
rtnl_lock();
+ be_setup(adapter);
be_open(netdev);
rtnl_unlock();
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 677f604..679885a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1997,7 +1997,7 @@
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
struct e1000_hw *hw = &adapter->hw;
struct net_device *poll_dev = adapter->netdev;
- int tx_cleaned = 0, work_done = 0;
+ int tx_cleaned = 1, work_done = 0;
adapter = netdev_priv(poll_dev);
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index e02bafd..93f4abd 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -668,7 +668,7 @@
queue_work(mdev->workqueue, &priv->mcast_task);
priv->port_up = true;
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
mac_err:
@@ -700,14 +700,14 @@
en_dbg(DRV, priv, "stop port called while port already down\n");
return;
}
- netif_stop_queue(dev);
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
- priv->port_up = false;
+ netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
/* close port*/
+ priv->port_up = false;
mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Unregister Mac address for the port */
@@ -881,7 +881,6 @@
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
cancel_delayed_work(&priv->stats_task);
- cancel_delayed_work(&priv->refill_task);
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
@@ -986,7 +985,6 @@
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
- INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 5a14899..91bdfdf 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -269,31 +269,6 @@
return 0;
}
-static int mlx4_en_fill_rx_buf(struct net_device *dev,
- struct mlx4_en_rx_ring *ring)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- int num = 0;
- int err;
-
- while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
- err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
- ring->size_mask);
- if (err) {
- if (netif_msg_rx_err(priv))
- en_warn(priv, "Failed preparing rx descriptor\n");
- priv->port_stats.rx_alloc_failed++;
- break;
- }
- ++num;
- ++ring->prod;
- }
- if ((u32) (ring->prod - ring->cons) == ring->actual_size)
- ring->full = 1;
-
- return num;
-}
-
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
@@ -312,42 +287,6 @@
}
}
-
-void mlx4_en_rx_refill(struct work_struct *work)
-{
- struct delayed_work *delay = to_delayed_work(work);
- struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
- refill_task);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct net_device *dev = priv->dev;
- struct mlx4_en_rx_ring *ring;
- int need_refill = 0;
- int i;
-
- mutex_lock(&mdev->state_lock);
- if (!mdev->device_up || !priv->port_up)
- goto out;
-
- /* We only get here if there are no receive buffers, so we can't race
- * with Rx interrupts while filling buffers */
- for (i = 0; i < priv->rx_ring_num; i++) {
- ring = &priv->rx_ring[i];
- if (ring->need_refill) {
- if (mlx4_en_fill_rx_buf(dev, ring)) {
- ring->need_refill = 0;
- mlx4_en_update_rx_prod_db(ring);
- } else
- need_refill = 1;
- }
- }
- if (need_refill)
- queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
-
-out:
- mutex_unlock(&mdev->state_lock);
-}
-
-
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
@@ -457,9 +396,6 @@
ring_ind--;
goto err_allocator;
}
-
- /* Fill Rx buffers */
- ring->full = 0;
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
@@ -647,33 +583,6 @@
return skb;
}
-static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
- int from, int to, int num)
-{
- struct skb_frag_struct *skb_frags_from;
- struct skb_frag_struct *skb_frags_to;
- struct mlx4_en_rx_desc *rx_desc_from;
- struct mlx4_en_rx_desc *rx_desc_to;
- int from_index, to_index;
- int nr, i;
-
- for (i = 0; i < num; i++) {
- from_index = (from + i) & ring->size_mask;
- to_index = (to + i) & ring->size_mask;
- skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
- skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
- rx_desc_from = ring->buf + (from_index << ring->log_stride);
- rx_desc_to = ring->buf + (to_index << ring->log_stride);
-
- for (nr = 0; nr < priv->num_frags; nr++) {
- skb_frags_to[nr].page = skb_frags_from[nr].page;
- skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
- rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
- }
- }
-}
-
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
@@ -821,11 +730,6 @@
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
ring->prod += polled; /* Polled descriptors were realocated in place */
- if (unlikely(!ring->full)) {
- mlx4_en_copy_desc(priv, ring, ring->cons - polled,
- ring->prod - polled, polled);
- mlx4_en_fill_rx_buf(dev, ring);
- }
mlx4_en_update_rx_prod_db(ring);
return polled;
}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 5dc7466..08c43f2 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -515,16 +515,9 @@
else {
if (netif_msg_tx_err(priv))
en_warn(priv, "Non-linear headers\n");
- dev_kfree_skb_any(skb);
return 0;
}
}
- if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "LSO header size too big\n");
- dev_kfree_skb_any(skb);
- return 0;
- }
} else {
*lso_header_size = 0;
if (!is_inline(skb, NULL))
@@ -616,13 +609,9 @@
int lso_header_size;
void *fragptr;
- if (unlikely(!skb->len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
real_size = get_real_size(skb, dev, &lso_header_size);
if (unlikely(!real_size))
- return NETDEV_TX_OK;
+ goto tx_drop;
/* Allign descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -630,8 +619,7 @@
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ goto tx_drop;
}
tx_ind = skb->queue_mapping;
@@ -653,14 +641,6 @@
return NETDEV_TX_BUSY;
}
- /* Now that we know what Tx ring to use */
- if (unlikely(!priv->port_up)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "xmit: port down!\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32) (ring->prod - ring->cons - 1));
@@ -785,5 +765,10 @@
mlx4_en_xmit_poll(priv, tx_ind);
return 0;
+
+tx_drop:
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index d43a9e4..c7c5e86 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -99,7 +99,6 @@
#define RSS_FACTOR 2
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
-#define MAX_LSO_HDR_SIZE 92
#define STAMP_STRIDE 64
#define STAMP_DWORDS (STAMP_STRIDE / 4)
#define STAMP_SHIFT 31
@@ -296,8 +295,6 @@
u32 prod;
u32 cons;
u32 buf_size;
- int need_refill;
- int full;
void *buf;
void *rx_info;
unsigned long bytes;
@@ -495,7 +492,6 @@
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct work_struct mcast_task;
struct work_struct mac_task;
- struct delayed_work refill_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
@@ -565,7 +561,6 @@
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
-void mlx4_en_rx_refill(struct work_struct *work);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 745ae8b4..0f32db3 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1750,12 +1750,12 @@
uc_addr_set(mp, dev->dev_addr);
- port_config = rdlp(mp, PORT_CONFIG);
+ port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
nibbles = uc_addr_filter_mask(dev);
if (!nibbles) {
port_config |= UNICAST_PROMISCUOUS_MODE;
- wrlp(mp, PORT_CONFIG, port_config);
- return;
+ nibbles = 0xffff;
}
for (i = 0; i < 16; i += 4) {
@@ -1776,7 +1776,6 @@
wrl(mp, off, v);
}
- port_config &= ~UNICAST_PROMISCUOUS_MODE;
wrlp(mp, PORT_CONFIG, port_config);
}
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6de8399..17c116b 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -356,7 +356,6 @@
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
ap_put(ap);
- tty_unthrottle(tty);
}
static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index d2fa2db..aa3d39f 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -397,7 +397,6 @@
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
sp_put(ap);
- tty_unthrottle(tty);
}
static void
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8a823ec..bbc6d4d 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -3837,7 +3837,9 @@
16) | ISP_CONTROL_RI));
}
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
ssleep(1);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
} while (--max_wait_time);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4e22462..4b53b58 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -51,9 +51,6 @@
#define TX_BUFFS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static const int max_interrupt_work = 20;
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
new file mode 100644
index 0000000..5345e47
--- /dev/null
+++ b/drivers/net/s6gmac.c
@@ -0,0 +1,1073 @@
+/*
+ * Ethernet driver for S6105 on chip network device
+ * (c)2008 emlix GmbH http://www.emlix.com
+ * Authors: Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/stddef.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <variant/hardware.h>
+#include <variant/dmac.h>
+
+#define DRV_NAME "s6gmac"
+#define DRV_PRMT DRV_NAME ": "
+
+
+/* register declarations */
+
+#define S6_GMAC_MACCONF1 0x000
+#define S6_GMAC_MACCONF1_TXENA 0
+#define S6_GMAC_MACCONF1_SYNCTX 1
+#define S6_GMAC_MACCONF1_RXENA 2
+#define S6_GMAC_MACCONF1_SYNCRX 3
+#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
+#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
+#define S6_GMAC_MACCONF1_LOOPBACK 8
+#define S6_GMAC_MACCONF1_RESTXFUNC 16
+#define S6_GMAC_MACCONF1_RESRXFUNC 17
+#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
+#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
+#define S6_GMAC_MACCONF1_SIMULRES 30
+#define S6_GMAC_MACCONF1_SOFTRES 31
+#define S6_GMAC_MACCONF2 0x004
+#define S6_GMAC_MACCONF2_FULL 0
+#define S6_GMAC_MACCONF2_CRCENA 1
+#define S6_GMAC_MACCONF2_PADCRCENA 2
+#define S6_GMAC_MACCONF2_LENGTHFCHK 4
+#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
+#define S6_GMAC_MACCONF2_IFMODE 8
+#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
+#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
+#define S6_GMAC_MACCONF2_IFMODE_MASK 3
+#define S6_GMAC_MACCONF2_PREAMBLELEN 12
+#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
+#define S6_GMAC_MACIPGIFG 0x008
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
+#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
+#define S6_GMAC_MACHALFDUPLEX 0x00C
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
+#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
+#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
+#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
+#define S6_GMAC_MACMAXFRAMELEN 0x010
+#define S6_GMAC_MACMIICONF 0x020
+#define S6_GMAC_MACMIICONF_CSEL 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
+#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
+#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
+#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
+#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
+#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
+#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
+#define S6_GMAC_MACMIICONF_CSEL_MASK 7
+#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
+#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
+#define S6_GMAC_MACMIICMD 0x024
+#define S6_GMAC_MACMIICMD_READ 0
+#define S6_GMAC_MACMIICMD_SCAN 1
+#define S6_GMAC_MACMIIADDR 0x028
+#define S6_GMAC_MACMIIADDR_REG 0
+#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
+#define S6_GMAC_MACMIIADDR_PHY 8
+#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
+#define S6_GMAC_MACMIICTRL 0x02C
+#define S6_GMAC_MACMIISTAT 0x030
+#define S6_GMAC_MACMIIINDI 0x034
+#define S6_GMAC_MACMIIINDI_BUSY 0
+#define S6_GMAC_MACMIIINDI_SCAN 1
+#define S6_GMAC_MACMIIINDI_INVAL 2
+#define S6_GMAC_MACINTERFSTAT 0x03C
+#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
+#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
+#define S6_GMAC_MACSTATADDR1 0x040
+#define S6_GMAC_MACSTATADDR2 0x044
+
+#define S6_GMAC_FIFOCONF0 0x048
+#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
+#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
+#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
+#define S6_GMAC_FIFOCONF0_HSTRSTST 3
+#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
+#define S6_GMAC_FIFOCONF0_WTMENREQ 8
+#define S6_GMAC_FIFOCONF0_SRFENREQ 9
+#define S6_GMAC_FIFOCONF0_FRFENREQ 10
+#define S6_GMAC_FIFOCONF0_STFENREQ 11
+#define S6_GMAC_FIFOCONF0_FTFENREQ 12
+#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
+#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
+#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
+#define S6_GMAC_FIFOCONF0_STFENRPLY 19
+#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
+#define S6_GMAC_FIFOCONF1 0x04C
+#define S6_GMAC_FIFOCONF2 0x050
+#define S6_GMAC_FIFOCONF2_CFGLWM 0
+#define S6_GMAC_FIFOCONF2_CFGHWM 16
+#define S6_GMAC_FIFOCONF3 0x054
+#define S6_GMAC_FIFOCONF3_CFGFTTH 0
+#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
+#define S6_GMAC_FIFOCONF4 0x058
+#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
+#define S6_GMAC_FIFOCONF_RSV_RUNT 1
+#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
+#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
+#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
+#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
+#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
+#define S6_GMAC_FIFOCONF_RSV_OK 7
+#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
+#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
+#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
+#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
+#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
+#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
+#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
+#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
+#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
+#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
+#define S6_GMAC_FIFOCONF5 0x05C
+#define S6_GMAC_FIFOCONF5_DROPLT64 18
+#define S6_GMAC_FIFOCONF5_CFGBYTM 19
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
+
+#define S6_GMAC_STAT_REGS 0x080
+#define S6_GMAC_STAT_SIZE_MIN 12
+#define S6_GMAC_STATTR64 0x080
+#define S6_GMAC_STATTR64_SIZE 18
+#define S6_GMAC_STATTR127 0x084
+#define S6_GMAC_STATTR127_SIZE 18
+#define S6_GMAC_STATTR255 0x088
+#define S6_GMAC_STATTR255_SIZE 18
+#define S6_GMAC_STATTR511 0x08C
+#define S6_GMAC_STATTR511_SIZE 18
+#define S6_GMAC_STATTR1K 0x090
+#define S6_GMAC_STATTR1K_SIZE 18
+#define S6_GMAC_STATTRMAX 0x094
+#define S6_GMAC_STATTRMAX_SIZE 18
+#define S6_GMAC_STATTRMGV 0x098
+#define S6_GMAC_STATTRMGV_SIZE 18
+#define S6_GMAC_STATRBYT 0x09C
+#define S6_GMAC_STATRBYT_SIZE 24
+#define S6_GMAC_STATRPKT 0x0A0
+#define S6_GMAC_STATRPKT_SIZE 18
+#define S6_GMAC_STATRFCS 0x0A4
+#define S6_GMAC_STATRFCS_SIZE 12
+#define S6_GMAC_STATRMCA 0x0A8
+#define S6_GMAC_STATRMCA_SIZE 18
+#define S6_GMAC_STATRBCA 0x0AC
+#define S6_GMAC_STATRBCA_SIZE 22
+#define S6_GMAC_STATRXCF 0x0B0
+#define S6_GMAC_STATRXCF_SIZE 18
+#define S6_GMAC_STATRXPF 0x0B4
+#define S6_GMAC_STATRXPF_SIZE 12
+#define S6_GMAC_STATRXUO 0x0B8
+#define S6_GMAC_STATRXUO_SIZE 12
+#define S6_GMAC_STATRALN 0x0BC
+#define S6_GMAC_STATRALN_SIZE 12
+#define S6_GMAC_STATRFLR 0x0C0
+#define S6_GMAC_STATRFLR_SIZE 16
+#define S6_GMAC_STATRCDE 0x0C4
+#define S6_GMAC_STATRCDE_SIZE 12
+#define S6_GMAC_STATRCSE 0x0C8
+#define S6_GMAC_STATRCSE_SIZE 12
+#define S6_GMAC_STATRUND 0x0CC
+#define S6_GMAC_STATRUND_SIZE 12
+#define S6_GMAC_STATROVR 0x0D0
+#define S6_GMAC_STATROVR_SIZE 12
+#define S6_GMAC_STATRFRG 0x0D4
+#define S6_GMAC_STATRFRG_SIZE 12
+#define S6_GMAC_STATRJBR 0x0D8
+#define S6_GMAC_STATRJBR_SIZE 12
+#define S6_GMAC_STATRDRP 0x0DC
+#define S6_GMAC_STATRDRP_SIZE 12
+#define S6_GMAC_STATTBYT 0x0E0
+#define S6_GMAC_STATTBYT_SIZE 24
+#define S6_GMAC_STATTPKT 0x0E4
+#define S6_GMAC_STATTPKT_SIZE 18
+#define S6_GMAC_STATTMCA 0x0E8
+#define S6_GMAC_STATTMCA_SIZE 18
+#define S6_GMAC_STATTBCA 0x0EC
+#define S6_GMAC_STATTBCA_SIZE 18
+#define S6_GMAC_STATTXPF 0x0F0
+#define S6_GMAC_STATTXPF_SIZE 12
+#define S6_GMAC_STATTDFR 0x0F4
+#define S6_GMAC_STATTDFR_SIZE 12
+#define S6_GMAC_STATTEDF 0x0F8
+#define S6_GMAC_STATTEDF_SIZE 12
+#define S6_GMAC_STATTSCL 0x0FC
+#define S6_GMAC_STATTSCL_SIZE 12
+#define S6_GMAC_STATTMCL 0x100
+#define S6_GMAC_STATTMCL_SIZE 12
+#define S6_GMAC_STATTLCL 0x104
+#define S6_GMAC_STATTLCL_SIZE 12
+#define S6_GMAC_STATTXCL 0x108
+#define S6_GMAC_STATTXCL_SIZE 12
+#define S6_GMAC_STATTNCL 0x10C
+#define S6_GMAC_STATTNCL_SIZE 13
+#define S6_GMAC_STATTPFH 0x110
+#define S6_GMAC_STATTPFH_SIZE 12
+#define S6_GMAC_STATTDRP 0x114
+#define S6_GMAC_STATTDRP_SIZE 12
+#define S6_GMAC_STATTJBR 0x118
+#define S6_GMAC_STATTJBR_SIZE 12
+#define S6_GMAC_STATTFCS 0x11C
+#define S6_GMAC_STATTFCS_SIZE 12
+#define S6_GMAC_STATTXCF 0x120
+#define S6_GMAC_STATTXCF_SIZE 12
+#define S6_GMAC_STATTOVR 0x124
+#define S6_GMAC_STATTOVR_SIZE 12
+#define S6_GMAC_STATTUND 0x128
+#define S6_GMAC_STATTUND_SIZE 12
+#define S6_GMAC_STATTFRG 0x12C
+#define S6_GMAC_STATTFRG_SIZE 12
+#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
+#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
+#define S6_GMAC_STATCARRY1_RDRP 0
+#define S6_GMAC_STATCARRY1_RJBR 1
+#define S6_GMAC_STATCARRY1_RFRG 2
+#define S6_GMAC_STATCARRY1_ROVR 3
+#define S6_GMAC_STATCARRY1_RUND 4
+#define S6_GMAC_STATCARRY1_RCSE 5
+#define S6_GMAC_STATCARRY1_RCDE 6
+#define S6_GMAC_STATCARRY1_RFLR 7
+#define S6_GMAC_STATCARRY1_RALN 8
+#define S6_GMAC_STATCARRY1_RXUO 9
+#define S6_GMAC_STATCARRY1_RXPF 10
+#define S6_GMAC_STATCARRY1_RXCF 11
+#define S6_GMAC_STATCARRY1_RBCA 12
+#define S6_GMAC_STATCARRY1_RMCA 13
+#define S6_GMAC_STATCARRY1_RFCS 14
+#define S6_GMAC_STATCARRY1_RPKT 15
+#define S6_GMAC_STATCARRY1_RBYT 16
+#define S6_GMAC_STATCARRY1_TRMGV 25
+#define S6_GMAC_STATCARRY1_TRMAX 26
+#define S6_GMAC_STATCARRY1_TR1K 27
+#define S6_GMAC_STATCARRY1_TR511 28
+#define S6_GMAC_STATCARRY1_TR255 29
+#define S6_GMAC_STATCARRY1_TR127 30
+#define S6_GMAC_STATCARRY1_TR64 31
+#define S6_GMAC_STATCARRY2_TDRP 0
+#define S6_GMAC_STATCARRY2_TPFH 1
+#define S6_GMAC_STATCARRY2_TNCL 2
+#define S6_GMAC_STATCARRY2_TXCL 3
+#define S6_GMAC_STATCARRY2_TLCL 4
+#define S6_GMAC_STATCARRY2_TMCL 5
+#define S6_GMAC_STATCARRY2_TSCL 6
+#define S6_GMAC_STATCARRY2_TEDF 7
+#define S6_GMAC_STATCARRY2_TDFR 8
+#define S6_GMAC_STATCARRY2_TXPF 9
+#define S6_GMAC_STATCARRY2_TBCA 10
+#define S6_GMAC_STATCARRY2_TMCA 11
+#define S6_GMAC_STATCARRY2_TPKT 12
+#define S6_GMAC_STATCARRY2_TBYT 13
+#define S6_GMAC_STATCARRY2_TFRG 14
+#define S6_GMAC_STATCARRY2_TUND 15
+#define S6_GMAC_STATCARRY2_TOVR 16
+#define S6_GMAC_STATCARRY2_TXCF 17
+#define S6_GMAC_STATCARRY2_TFCS 18
+#define S6_GMAC_STATCARRY2_TJBR 19
+
+#define S6_GMAC_HOST_PBLKCTRL 0x140
+#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
+#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
+#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
+#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
+#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
+#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
+#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
+#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
+#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
+#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
+#define S6_GMAC_HOST_INTMASK 0x144
+#define S6_GMAC_HOST_INTSTAT 0x148
+#define S6_GMAC_HOST_INT_TXBURSTOVER 3
+#define S6_GMAC_HOST_INT_TXPREWOVER 4
+#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
+#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
+#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
+#define S6_GMAC_HOST_RXFIFOHWM 0x14C
+#define S6_GMAC_HOST_CTRLFRAMXP 0x150
+#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
+#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
+
+#define S6_GMAC_BURST_PREWR 0x1B0
+#define S6_GMAC_BURST_PREWR_LEN 0
+#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_PREWR_CFE 20
+#define S6_GMAC_BURST_PREWR_PPE 21
+#define S6_GMAC_BURST_PREWR_FCS 22
+#define S6_GMAC_BURST_PREWR_PAD 23
+#define S6_GMAC_BURST_POSTRD 0x1D0
+#define S6_GMAC_BURST_POSTRD_LEN 0
+#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_POSTRD_DROP 20
+
+
+/* data handling */
+
+#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
+#define S6_NUM_RX_SKB 16
+#define S6_MAX_FRLEN 1536
+
+struct s6gmac {
+ u32 reg;
+ u32 tx_dma;
+ u32 rx_dma;
+ u32 io;
+ u8 tx_chan;
+ u8 rx_chan;
+ spinlock_t lock;
+ u8 tx_skb_i, tx_skb_o;
+ u8 rx_skb_i, rx_skb_o;
+ struct sk_buff *tx_skb[S6_NUM_TX_SKB];
+ struct sk_buff *rx_skb[S6_NUM_RX_SKB];
+ unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
+ unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
+ struct phy_device *phydev;
+ struct {
+ struct mii_bus *bus;
+ int irq[PHY_MAX_ADDR];
+ } mii;
+ struct {
+ unsigned int mbit;
+ u8 giga;
+ u8 isup;
+ u8 full;
+ } link;
+};
+
+static void s6gmac_rx_fillfifo(struct s6gmac *pd)
+{
+ struct sk_buff *skb;
+ while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB)
+ && (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan))
+ && (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
+ pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
+ s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
+ pd->io, (u32)skb->data, S6_MAX_FRLEN);
+ }
+}
+
+static void s6gmac_rx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 pfx;
+ struct sk_buff *skb;
+ while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
+ s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
+ skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
+ pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
+ if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
+ dev_kfree_skb_irq(skb);
+ } else {
+ skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
+ & S6_GMAC_BURST_POSTRD_LEN_MASK);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_rx(skb);
+ }
+ }
+}
+
+static void s6gmac_tx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
+ s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
+ dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ }
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+}
+
+struct s6gmac_statinf {
+ unsigned reg_size : 4; /* 0: unused */
+ unsigned reg_off : 6;
+ unsigned net_index : 6;
+};
+
+#define S6_STATS_B (8 * sizeof(u32))
+#define S6_STATS_C(b, r, f) [b] = { \
+ BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
+ BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
+ >= (1<<4)) + \
+ r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
+ BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
+ >= ((1<<6)-1)) + \
+ (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
+ BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
+ % sizeof(unsigned long)) + \
+ BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
+ / sizeof(unsigned long)) >= (1<<6))) + \
+ BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
+ != sizeof(unsigned long))) + \
+ (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
+
+static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
+ S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
+}, {
+ S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
+} };
+
+static void s6gmac_stats_collect(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf)
+{
+ int b;
+ for (b = 0; b < S6_STATS_B; b++) {
+ if (inf[b].reg_size) {
+ pd->stats[inf[b].net_index] +=
+ readl(pd->reg + S6_GMAC_STAT_REGS
+ + sizeof(u32) * inf[b].reg_off);
+ }
+ }
+}
+
+static void s6gmac_stats_carry(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf, u32 mask)
+{
+ int b;
+ while (mask) {
+ b = fls(mask) - 1;
+ mask &= ~(1 << b);
+ pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
+ }
+}
+
+static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
+{
+ int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
+ ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
+ return r;
+}
+
+static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
+{
+ u32 mask;
+ mask = s6gmac_stats_pending(pd, carry);
+ if (mask) {
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
+ s6gmac_stats_carry(pd, &statinf[carry][0], mask);
+ }
+}
+
+static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct s6gmac *pd = netdev_priv(dev);
+ if (!dev)
+ return IRQ_NONE;
+ spin_lock(&pd->lock);
+ if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
+ s6gmac_rx_interrupt(dev);
+ s6gmac_rx_fillfifo(pd);
+ if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
+ s6gmac_tx_interrupt(dev);
+ s6gmac_stats_interrupt(pd, 0);
+ s6gmac_stats_interrupt(pd, 1);
+ spin_unlock(&pd->lock);
+ return IRQ_HANDLED;
+}
+
+static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
+ u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
+{
+ writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
+ writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
+ writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
+ writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
+}
+
+static inline void s6gmac_stop_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ writel(0, pd->reg + S6_GMAC_MACCONF1);
+}
+
+static inline void s6gmac_init_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int is_rgmii = !!(pd->phydev->supported
+ & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
+#if 0
+ writel(1 << S6_GMAC_MACCONF1_SYNCTX |
+ 1 << S6_GMAC_MACCONF1_SYNCRX |
+ 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RESTXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESRXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
+ 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
+ pd->reg + S6_GMAC_MACCONF1);
+#endif
+ writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
+ udelay(1000);
+ writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(1 << S6_GMAC_MACCONF1_TXENA |
+ 1 << S6_GMAC_MACCONF1_RXENA |
+ (dev->flags & IFF_LOOPBACK ? 1 : 0)
+ << S6_GMAC_MACCONF1_LOOPBACK,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
+ dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
+ pd->reg + S6_GMAC_MACMAXFRAMELEN);
+ writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
+ 1 << S6_GMAC_MACCONF2_PADCRCENA |
+ 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
+ (pd->link.giga ?
+ S6_GMAC_MACCONF2_IFMODE_BYTE :
+ S6_GMAC_MACCONF2_IFMODE_NIBBLE)
+ << S6_GMAC_MACCONF2_IFMODE |
+ 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
+ pd->reg + S6_GMAC_MACCONF2);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
+ writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
+ 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_STFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
+ pd->reg + S6_GMAC_FIFOCONF0);
+ writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
+ 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
+ pd->reg + S6_GMAC_FIFOCONF3);
+ writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
+ 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_OK |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
+ 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
+ pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
+ 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
+ pd->reg + S6_GMAC_FIFOCONF5);
+ writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
+ pd->reg + S6_GMAC_FIFOCONF4);
+ s6gmac_set_dstaddr(pd, 0,
+ 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 1,
+ dev->dev_addr[5] |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[2] << 24,
+ dev->dev_addr[1] |
+ dev->dev_addr[0] << 8,
+ 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 2,
+ 0x00000000, 0x00000100, 0x00000000, 0x00000100);
+ s6gmac_set_dstaddr(pd, 3,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+}
+
+static void s6mii_enable(struct s6gmac *pd)
+{
+ writel(readl(pd->reg + S6_GMAC_MACCONF1) &
+ ~(1 << S6_GMAC_MACCONF1_SOFTRES),
+ pd->reg + S6_GMAC_MACCONF1);
+ writel((readl(pd->reg + S6_GMAC_MACMIICONF)
+ & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
+ | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
+ pd->reg + S6_GMAC_MACMIICONF);
+}
+
+static int s6mii_busy(struct s6gmac *pd, int tmo)
+{
+ while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
+ if (--tmo == 0)
+ return -ETIME;
+ udelay(64);
+ }
+ return 0;
+}
+
+static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
+ writel(0, pd->reg + S6_GMAC_MACMIICMD);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
+}
+
+static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(value, pd->reg + S6_GMAC_MACMIICTRL);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return 0;
+}
+
+static int s6mii_reset(struct mii_bus *bus)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
+ return -ETIME;
+ return 0;
+}
+
+static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
+{
+ u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
+ pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
+ switch (pd->link.mbit) {
+ case 10:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 100:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 1000:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ default:
+ return;
+ }
+ writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
+}
+
+static inline void s6gmac_linkisup(struct net_device *dev, int isup)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+
+ pd->link.full = phydev->duplex;
+ pd->link.giga = (phydev->speed == 1000);
+ if (pd->link.mbit != phydev->speed) {
+ pd->link.mbit = phydev->speed;
+ s6gmac_set_rgmii_txclock(pd);
+ }
+ pd->link.isup = isup;
+ if (isup)
+ netif_carrier_on(dev);
+ phy_print_status(phydev);
+}
+
+static void s6gmac_adjust_link(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+ if (pd->link.isup &&
+ (!phydev->link ||
+ (pd->link.mbit != phydev->speed) ||
+ (pd->link.full != phydev->duplex))) {
+ pd->link.isup = 0;
+ netif_tx_disable(dev);
+ if (!phydev->link) {
+ netif_carrier_off(dev);
+ phy_print_status(phydev);
+ }
+ }
+ if (!pd->link.isup && phydev->link) {
+ if (pd->link.full != phydev->duplex) {
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ if (phydev->duplex)
+ maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
+ else
+ maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (pd->link.giga != (phydev->speed == 1000)) {
+ u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
+ << S6_GMAC_MACCONF2_IFMODE);
+ if (phydev->speed == 1000) {
+ fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
+ << S6_GMAC_MACCONF2_IFMODE;
+ } else {
+ fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
+ << S6_GMAC_MACCONF2_IFMODE;
+ }
+ writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+ s6gmac_linkisup(dev, 1);
+ }
+}
+
+static inline int s6gmac_phy_start(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int i = 0;
+ struct phy_device *p = NULL;
+ while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR))
+ i++;
+ p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII);
+ if (IS_ERR(p)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(p);
+ }
+ p->supported &= PHY_GBIT_FEATURES;
+ p->advertising = p->supported;
+ pd->phydev = p;
+ return 0;
+}
+
+static inline void s6gmac_init_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 mask;
+ mask = 1 << S6_GMAC_STATCARRY1_RDRP |
+ 1 << S6_GMAC_STATCARRY1_RJBR |
+ 1 << S6_GMAC_STATCARRY1_RFRG |
+ 1 << S6_GMAC_STATCARRY1_ROVR |
+ 1 << S6_GMAC_STATCARRY1_RUND |
+ 1 << S6_GMAC_STATCARRY1_RCDE |
+ 1 << S6_GMAC_STATCARRY1_RFLR |
+ 1 << S6_GMAC_STATCARRY1_RALN |
+ 1 << S6_GMAC_STATCARRY1_RMCA |
+ 1 << S6_GMAC_STATCARRY1_RFCS |
+ 1 << S6_GMAC_STATCARRY1_RPKT |
+ 1 << S6_GMAC_STATCARRY1_RBYT;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
+ mask = 1 << S6_GMAC_STATCARRY2_TDRP |
+ 1 << S6_GMAC_STATCARRY2_TNCL |
+ 1 << S6_GMAC_STATCARRY2_TXCL |
+ 1 << S6_GMAC_STATCARRY2_TEDF |
+ 1 << S6_GMAC_STATCARRY2_TPKT |
+ 1 << S6_GMAC_STATCARRY2_TBYT |
+ 1 << S6_GMAC_STATCARRY2_TFRG |
+ 1 << S6_GMAC_STATCARRY2_TUND |
+ 1 << S6_GMAC_STATCARRY2_TOVR |
+ 1 << S6_GMAC_STATCARRY2_TFCS |
+ 1 << S6_GMAC_STATCARRY2_TJBR;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
+}
+
+static inline void s6gmac_init_dmac(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
+ s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
+ s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
+ s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
+}
+
+static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ dev->trans_start = jiffies;
+ writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
+ 0 << S6_GMAC_BURST_PREWR_CFE |
+ 1 << S6_GMAC_BURST_PREWR_PPE |
+ 1 << S6_GMAC_BURST_PREWR_FCS |
+ ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
+ pd->reg + S6_GMAC_BURST_PREWR);
+ s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
+ (u32)skb->data, pd->io, skb->len);
+ if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_stop_queue(dev);
+ if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
+ printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
+ pd->tx_skb_o, pd->tx_skb_i);
+ BUG();
+ }
+ pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static void s6gmac_tx_timeout(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_tx_interrupt(dev);
+ spin_unlock_irqrestore(&pd->lock, flags);
+}
+
+static int s6gmac_open(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ phy_read_status(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ pd->link.mbit = 0;
+ s6gmac_linkisup(dev, pd->phydev->link);
+ s6gmac_init_device(dev);
+ s6gmac_init_stats(dev);
+ s6gmac_init_dmac(dev);
+ s6gmac_rx_fillfifo(pd);
+ s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
+ 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
+ s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
+ 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
+ writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
+ 0 << S6_GMAC_HOST_INT_TXPREWOVER |
+ 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
+ pd->reg + S6_GMAC_HOST_INTMASK);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ phy_start(pd->phydev);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int s6gmac_stop(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ netif_stop_queue(dev);
+ phy_stop(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_init_dmac(dev);
+ s6gmac_stop_device(dev);
+ while (pd->tx_skb_i != pd->tx_skb_o)
+ dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ while (pd->rx_skb_i != pd->rx_skb_o)
+ dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static struct net_device_stats *s6gmac_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
+ int i;
+ do {
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++)
+ pd->stats[i] =
+ pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
+ s6gmac_stats_collect(pd, &statinf[0][0]);
+ s6gmac_stats_collect(pd, &statinf[1][0]);
+ i = s6gmac_stats_pending(pd, 0) |
+ s6gmac_stats_pending(pd, 1);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ } while (i);
+ st->rx_errors = st->rx_crc_errors +
+ st->rx_frame_errors +
+ st->rx_length_errors +
+ st->rx_missed_errors;
+ st->tx_errors += st->tx_aborted_errors;
+ return st;
+}
+
+static int __devinit s6gmac_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct s6gmac *pd;
+ int res;
+ unsigned long i;
+ struct mii_bus *mb;
+ dev = alloc_etherdev(sizeof(*pd));
+ if (!dev) {
+ printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n");
+ return -ENOMEM;
+ }
+ dev->open = s6gmac_open;
+ dev->stop = s6gmac_stop;
+ dev->hard_start_xmit = s6gmac_tx;
+ dev->tx_timeout = s6gmac_tx_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->get_stats = s6gmac_stats;
+ dev->irq = platform_get_irq(pdev, 0);
+ pd = netdev_priv(dev);
+ memset(pd, 0, sizeof(*pd));
+ spin_lock_init(&pd->lock);
+ pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
+ pd->tx_dma = DMA_MASK_DMAC(i);
+ pd->tx_chan = DMA_INDEX_CHNL(i);
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
+ pd->rx_dma = DMA_MASK_DMAC(i);
+ pd->rx_chan = DMA_INDEX_CHNL(i);
+ pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+ res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
+ goto errirq;
+ }
+ res = register_netdev(dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "error registering device %s\n",
+ dev->name);
+ goto errdev;
+ }
+ mb = mdiobus_alloc();
+ if (!mb) {
+ printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
+ goto errmii;
+ }
+ mb->name = "s6gmac_mii";
+ mb->read = s6mii_read;
+ mb->write = s6mii_write;
+ mb->reset = s6mii_reset;
+ mb->priv = pd;
+ snprintf(mb->id, MII_BUS_ID_SIZE, "0");
+ mb->phy_mask = ~(1 << 0);
+ mb->irq = &pd->mii.irq[0];
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ int n = platform_get_irq(pdev, i + 1);
+ if (n < 0)
+ n = PHY_POLL;
+ pd->mii.irq[i] = n;
+ }
+ mdiobus_register(mb);
+ pd->mii.bus = mb;
+ res = s6gmac_phy_start(dev);
+ if (res)
+ return res;
+ platform_set_drvdata(pdev, dev);
+ return 0;
+errmii:
+ unregister_netdev(dev);
+errdev:
+ free_irq(dev->irq, dev);
+errirq:
+ free_netdev(dev);
+ return res;
+}
+
+static int __devexit s6gmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ if (dev) {
+ struct s6gmac *pd = netdev_priv(dev);
+ mdiobus_unregister(pd->mii.bus);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return 0;
+}
+
+static struct platform_driver s6gmac_driver = {
+ .probe = s6gmac_probe,
+ .remove = __devexit_p(s6gmac_remove),
+ .driver = {
+ .name = "s6gmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s6gmac_init(void)
+{
+ printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
+ return platform_driver_register(&s6gmac_driver);
+}
+
+
+static void __exit s6gmac_exit(void)
+{
+ platform_driver_unregister(&s6gmac_driver);
+}
+
+module_init(s6gmac_init);
+module_exit(s6gmac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
+MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>");
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3717569..a906d39 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -169,10 +169,12 @@
The Linux-USB CDC Ethernet Gadget driver is an open implementation.
This driver should work with at least the following devices:
+ * Dell Wireless 5530 HSPA
* Ericsson PipeRider (all variants)
+ * Ericsson Mobile Broadband Module (all variants)
* Motorola (DM100 and SB4100)
* Broadcom Cable Modem (reference design)
- * Toshiba PCX1100U
+ * Toshiba (PCX1100U and F3507g)
* ...
This driver creates an interface named "ethX", where X depends on
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 01fd528..4a6aff5 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -533,6 +533,31 @@
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3507g ver. 2 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3607gw */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3307 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Toshiba F3507g */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Dell F3507g */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index c66b9c3..ca39ace 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -307,9 +307,10 @@
USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
.driver_info = (unsigned long) &blob_info,
}, {
- // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
- // e.g. Gumstix, current OpenZaurus, ...
- USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
+ // Linux Ethernet/RNDIS gadget, mostly on PXA, second config
+ // e.g. Gumstix, current OpenZaurus, ... or anything else
+ // that just enables this gadget option.
+ USB_DEVICE (0x0525, 0xa4a2),
.driver_info = (unsigned long) &linuxdev_info,
},
#endif
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 2138535..73acbd2 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -297,7 +297,7 @@
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
- pegasus->dr.wValue = 0;
+ pegasus->dr.wValue = cpu_to_le16(0);
pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
pegasus->dr.wLength = cpu_to_le16(3);
pegasus->ctrl_urb->transfer_buffer_length = 3;
@@ -446,11 +446,12 @@
int i;
__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
int ret;
+ __le16 le_data = cpu_to_le16(data);
set_registers(pegasus, EpromOffset, 4, d);
enable_eprom_write(pegasus);
set_register(pegasus, EpromOffset, index);
- set_registers(pegasus, EpromData, 2, &data);
+ set_registers(pegasus, EpromData, 2, &le_data);
set_register(pegasus, EpromCtrl, EPROM_WRITE);
for (i = 0; i < REG_TIMEOUT; i++) {
@@ -923,29 +924,32 @@
static inline void disable_net_traffic(pegasus_t * pegasus)
{
- int tmp = 0;
+ __le16 tmp = cpu_to_le16(0);
- set_registers(pegasus, EthCtrl0, 2, &tmp);
+ set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
}
static inline void get_interrupt_interval(pegasus_t * pegasus)
{
- __u8 data[2];
+ u16 data;
+ u8 interval;
- read_eprom_word(pegasus, 4, (__u16 *) data);
+ read_eprom_word(pegasus, 4, &data);
+ interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) {
- if (data[1] < 0x80) {
+ if (interval < 0x80) {
if (netif_msg_timer(pegasus))
dev_info(&pegasus->intf->dev, "intr interval "
"changed from %ums to %ums\n",
- data[1], 0x80);
- data[1] = 0x80;
+ interval, 0x80);
+ interval = 0x80;
+ data = (data & 0x00FF) | ((u16)interval << 8);
#ifdef PEGASUS_WRITE_EEPROM
- write_eprom_word(pegasus, 4, *(__u16 *) data);
+ write_eprom_word(pegasus, 4, data);
#endif
}
}
- pegasus->intr_interval = data[1];
+ pegasus->intr_interval = interval;
}
static void set_carrier(struct net_device *net)
@@ -1299,7 +1303,8 @@
/* Special quirk to keep the driver from handling the Belkin Bluetooth
* dongle which happens to have the same ID.
*/
- if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
+ if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
+ (udd->idProduct == cpu_to_le16(0x0121)) &&
(udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
(udd->bDeviceProtocol == 1))
return 1;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index b02f7ad..3ba3595 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1847,7 +1847,7 @@
*/
if (tdinfo->skb_dma) {
- pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN);
+ pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 55f7de0..ea04515 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -538,6 +538,7 @@
sc->iobase = mem; /* So we can unmap it on detach */
sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
sc->opmode = NL80211_IFTYPE_STATION;
+ sc->bintval = 1000;
mutex_init(&sc->lock);
spin_lock_init(&sc->rxbuflock);
spin_lock_init(&sc->txbuflock);
@@ -686,6 +687,13 @@
if (err)
return err;
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_write_config_byte(pdev, 0x41, 0);
+
err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
if (err) {
ATH5K_ERR(sc, "request_irq failed\n");
@@ -2748,9 +2756,6 @@
goto end;
}
- /* Set to a reasonable value. Note that this will
- * be set to mac80211's value at ath5k_config(). */
- sc->bintval = 1000;
ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
ret = 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9f49a32..66a6c1f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1196,8 +1196,8 @@
ath9k_hw_phy_disable(ah);
ath9k_hw_configpcipowersave(ah, 1);
- ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
ath9k_ps_restore(sc);
+ ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
}
/*******************/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index ccdf20a..170c5b3 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -87,6 +87,7 @@
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
+ u32 val;
int ret = 0;
struct ath_hw *ah;
@@ -133,6 +134,14 @@
pci_set_master(pdev);
+ /*
+ * Disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
ret = pci_request_region(pdev, 0, "ath9k");
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
@@ -239,12 +248,21 @@
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ u32 val;
int err;
err = pci_enable_device(pdev);
if (err)
return err;
pci_restore_state(pdev);
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
/* Enable LED */
ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index f99f3a7..cece1c4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -539,11 +539,14 @@
if (ath_beacon_dtim_pending_cab(skb)) {
/*
* Remain awake waiting for buffered broadcast/multicast
- * frames.
+ * frames. If the last broadcast/multicast frame is not
+ * received properly, the next beacon frame will work as
+ * a backup trigger for returning into NETWORK SLEEP state,
+ * so we are waiting for it as well.
*/
DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB;
+ sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
return;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 635c16e..77c339f 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -288,6 +288,7 @@
u8 *eeprom;
struct timer_list watchdog;
struct work_struct reset_worker;
+ struct mutex mutex;
struct rfkill *rfkill;
char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -315,8 +316,11 @@
void *iwm_if_alloc(int sizeof_bus, struct device *dev,
struct iwm_if_ops *if_ops);
void iwm_if_free(struct iwm_priv *iwm);
+int iwm_if_add(struct iwm_priv *iwm);
+void iwm_if_remove(struct iwm_priv *iwm);
int iwm_mode_to_nl80211_iftype(int mode);
int iwm_priv_init(struct iwm_priv *iwm);
+void iwm_priv_deinit(struct iwm_priv *iwm);
void iwm_reset(struct iwm_priv *iwm);
void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
struct iwm_umac_notif_alive *alive);
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 6a2640f..8be206d 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -112,6 +112,9 @@
iwm_send_umac_stats_req(iwm, 0);
}
+int __iwm_up(struct iwm_priv *iwm);
+int __iwm_down(struct iwm_priv *iwm);
+
static void iwm_reset_worker(struct work_struct *work)
{
struct iwm_priv *iwm;
@@ -120,6 +123,19 @@
iwm = container_of(work, struct iwm_priv, reset_worker);
+ /*
+ * XXX: The iwm->mutex is introduced purely for this reset work,
+ * because the other users for iwm_up and iwm_down are only netdev
+ * ndo_open and ndo_stop which are already protected by rtnl.
+ * Please remove iwm->mutex together if iwm_reset_worker() is not
+ * required in the future.
+ */
+ if (!mutex_trylock(&iwm->mutex)) {
+ IWM_WARN(iwm, "We are in the middle of interface bringing "
+ "UP/DOWN. Skip driver resetting.\n");
+ return;
+ }
+
if (iwm->umac_profile_active) {
profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
if (profile)
@@ -128,10 +144,10 @@
IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
}
- iwm_down(iwm);
+ __iwm_down(iwm);
while (retry++ < 3) {
- ret = iwm_up(iwm);
+ ret = __iwm_up(iwm);
if (!ret)
break;
@@ -142,7 +158,7 @@
IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
kfree(profile);
- return;
+ goto out;
}
if (profile) {
@@ -151,6 +167,9 @@
iwm_send_mlme_profile(iwm);
kfree(profile);
}
+
+ out:
+ mutex_unlock(&iwm->mutex);
}
static void iwm_watchdog(unsigned long data)
@@ -215,10 +234,21 @@
init_timer(&iwm->watchdog);
iwm->watchdog.function = iwm_watchdog;
iwm->watchdog.data = (unsigned long)iwm;
+ mutex_init(&iwm->mutex);
return 0;
}
+void iwm_priv_deinit(struct iwm_priv *iwm)
+{
+ int i;
+
+ for (i = 0; i < IWM_TX_QUEUES; i++)
+ destroy_workqueue(iwm->txq[i].wq);
+
+ destroy_workqueue(iwm->rx_wq);
+}
+
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@@ -466,7 +496,7 @@
iwm_rx_free(iwm);
- cancel_delayed_work(&iwm->stats_request);
+ cancel_delayed_work_sync(&iwm->stats_request);
memset(wstats, 0, sizeof(struct iw_statistics));
wstats->qual.updated = IW_QUAL_ALL_INVALID;
@@ -511,7 +541,7 @@
return 0;
}
-int iwm_up(struct iwm_priv *iwm)
+int __iwm_up(struct iwm_priv *iwm)
{
int ret;
struct iwm_notif *notif_reboot, *notif_ack = NULL;
@@ -647,7 +677,18 @@
return -EIO;
}
-int iwm_down(struct iwm_priv *iwm)
+int iwm_up(struct iwm_priv *iwm)
+{
+ int ret;
+
+ mutex_lock(&iwm->mutex);
+ ret = __iwm_up(iwm);
+ mutex_unlock(&iwm->mutex);
+
+ return ret;
+}
+
+int __iwm_down(struct iwm_priv *iwm)
{
int ret;
@@ -678,3 +719,14 @@
return 0;
}
+
+int iwm_down(struct iwm_priv *iwm)
+{
+ int ret;
+
+ mutex_lock(&iwm->mutex);
+ ret = __iwm_down(iwm);
+ mutex_unlock(&iwm->mutex);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 68e2c3b..aaa20c6 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -114,32 +114,31 @@
iwm = wdev_to_iwm(wdev);
iwm->bus_ops = if_ops;
iwm->wdev = wdev;
- iwm_priv_init(iwm);
+
+ ret = iwm_priv_init(iwm);
+ if (ret) {
+ dev_err(dev, "failed to init iwm_priv\n");
+ goto out_wdev;
+ }
+
wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
- ndev = alloc_netdev_mq(0, "wlan%d", ether_setup,
- IWM_TX_QUEUES);
+ ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
if (!ndev) {
dev_err(dev, "no memory for network device instance\n");
- goto out_wdev;
+ goto out_priv;
}
ndev->netdev_ops = &iwm_netdev_ops;
ndev->wireless_handlers = &iwm_iw_handler_def;
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- ret = register_netdev(ndev);
- if (ret < 0) {
- dev_err(dev, "Failed to register netdev: %d\n", ret);
- goto out_ndev;
- }
-
wdev->netdev = ndev;
return iwm;
- out_ndev:
- free_netdev(ndev);
+ out_priv:
+ iwm_priv_deinit(iwm);
out_wdev:
iwm_wdev_free(iwm);
@@ -148,15 +147,29 @@
void iwm_if_free(struct iwm_priv *iwm)
{
- int i;
-
if (!iwm_to_ndev(iwm))
return;
- unregister_netdev(iwm_to_ndev(iwm));
free_netdev(iwm_to_ndev(iwm));
iwm_wdev_free(iwm);
- destroy_workqueue(iwm->rx_wq);
- for (i = 0; i < IWM_TX_QUEUES; i++)
- destroy_workqueue(iwm->txq[i].wq);
+ iwm_priv_deinit(iwm);
+}
+
+int iwm_if_add(struct iwm_priv *iwm)
+{
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ int ret;
+
+ ret = register_netdev(ndev);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwm_if_remove(struct iwm_priv *iwm)
+{
+ unregister_netdev(iwm_to_ndev(iwm));
}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index b54da67..9166818 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -454,10 +454,18 @@
INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
+ ret = iwm_if_add(iwm);
+ if (ret) {
+ dev_err(dev, "add SDIO interface failed\n");
+ goto destroy_wq;
+ }
+
dev_info(dev, "IWM SDIO probe\n");
return 0;
+ destroy_wq:
+ destroy_workqueue(hw->isr_wq);
debugfs_exit:
iwm_debugfs_exit(iwm);
if_free:
@@ -471,9 +479,10 @@
struct iwm_priv *iwm = hw_to_iwm(hw);
struct device *dev = &func->dev;
+ iwm_if_remove(iwm);
+ destroy_workqueue(hw->isr_wq);
iwm_debugfs_exit(iwm);
iwm_if_free(iwm);
- destroy_workqueue(hw->isr_wq);
sdio_set_drvdata(func, NULL);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index f0e5e94..14a19ba 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -67,6 +67,7 @@
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 151bf5b..1032d5f 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1471,11 +1471,13 @@
static void __devinit winbond_check(int io, int key)
{
- int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+ int origval, devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
+ origval = inb(io); /* Save original value */
+
/* First probe without key */
outb(0x20, io);
x_devid = inb(io + 1);
@@ -1495,6 +1497,8 @@
oldid = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
goto out; /* protection against false positives */
@@ -1505,11 +1509,15 @@
static void __devinit winbond_check2(int io, int key)
{
- int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+ int origval[3], devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
+ origval[0] = inb(io); /* Save original values */
+ origval[1] = inb(io + 1);
+ origval[2] = inb(io + 2);
+
/* First probe without the key */
outb(0x20, io + 2);
x_devid = inb(io + 2);
@@ -1528,6 +1536,10 @@
oldid = inb(io + 2);
outb(0xaa, io); /* Magic Seal */
+ outb(origval[0], io); /* in case we poked some entirely different hardware */
+ outb(origval[1], io + 1);
+ outb(origval[2], io + 2);
+
if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
goto out; /* protection against false positives */
@@ -1538,11 +1550,13 @@
static void __devinit smsc_check(int io, int key)
{
- int id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
+ int origval, id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
if (!request_region(io, 3, __func__))
return;
+ origval = inb(io); /* Save original value */
+
/* First probe without the key */
outb(0x0d, io);
x_oldid = inb(io + 1);
@@ -1566,6 +1580,8 @@
rev = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
if (x_id == id && x_oldrev == oldrev &&
x_oldid == oldid && x_rev == rev)
goto out; /* protection against false positives */
@@ -1602,11 +1618,12 @@
static void __devinit detect_and_report_it87(void)
{
u16 dev;
- u8 r;
+ u8 origval, r;
if (verbose_probing)
printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
- if (!request_region(0x2e, 1, __func__))
+ if (!request_region(0x2e, 2, __func__))
return;
+ origval = inb(0x2e); /* Save original value */
outb(0x87, 0x2e);
outb(0x01, 0x2e);
outb(0x55, 0x2e);
@@ -1626,8 +1643,10 @@
outb(r | 8, 0x2F);
outb(0x02, 0x2E); /* Lock */
outb(0x02, 0x2F);
+ } else {
+ outb(origval, 0x2e); /* Oops, sorry to disturb */
}
- release_region(0x2e, 1);
+ release_region(0x2e, 2);
}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
@@ -2271,6 +2290,9 @@
if (IS_ERR(pdev))
return NULL;
dev = &pdev->dev;
+
+ dev->coherent_dma_mask = DMA_BIT_MASK(24);
+ dev->dma_mask = &dev->coherent_dma_mask;
}
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index b77ae67..1ebd6b4 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,10 +2,11 @@
# Makefile for the PCI bus specific drivers.
#
-obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
+obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSFS) += slot.o
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 0f37065..db23200 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,25 @@
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
+/**
+ * pci_bus_set_ops - Set raw operations of pci bus
+ * @bus: pci bus struct
+ * @ops: new raw operations
+ *
+ * Return previous raw operations
+ */
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
+{
+ struct pci_ops *old_ops;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_lock, flags);
+ old_ops = bus->ops;
+ bus->ops = ops;
+ spin_unlock_irqrestore(&pci_lock, flags);
+ return old_ops;
+}
+EXPORT_SYMBOL(pci_bus_set_ops);
/**
* pci_read_vpd - Read one entry from Vital Product Data
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 97a8194..cef28a7 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -41,9 +41,14 @@
void *alignf_data)
{
int i, ret = -ENOMEM;
+ resource_size_t max = -1;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+ /* don't allocate too high if the pref mem doesn't support 64bit*/
+ if (!(res->flags & IORESOURCE_MEM_64))
+ max = PCIBIOS_MAX_MEM_32;
+
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
if (!r)
@@ -62,7 +67,7 @@
/* Ok, try it out.. */
ret = allocate_resource(r, res, size,
r->start ? : min,
- -1, align,
+ max, align,
alignf, alignf_data);
if (ret == 0)
break;
@@ -201,13 +206,18 @@
* Walk the given bus, including any bridged devices
* on buses under this bus. Call the provided callback
* on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ *
*/
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata)
{
struct pci_dev *dev;
struct pci_bus *bus;
struct list_head *next;
+ int retval;
bus = top;
down_read(&pci_bus_sem);
@@ -231,8 +241,10 @@
/* Run device routines with the device locked */
down(&dev->dev.sem);
- cb(dev, userdata);
+ retval = cb(dev, userdata);
up(&dev->dev.sem);
+ if (retval)
+ break;
}
up_read(&pci_bus_sem);
}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index fa3a113..7b287cb 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -267,6 +267,84 @@
}
return ret;
}
+
+static LIST_HEAD(dmar_atsr_units);
+
+static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ if (!atsru)
+ return -ENOMEM;
+
+ atsru->hdr = hdr;
+ atsru->include_all = atsr->flags & 0x1;
+
+ list_add(&atsru->list, &dmar_atsr_units);
+
+ return 0;
+}
+
+static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+{
+ int rc;
+ struct acpi_dmar_atsr *atsr;
+
+ if (atsru->include_all)
+ return 0;
+
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ rc = dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+ if (rc || !atsru->devices_cnt) {
+ list_del(&atsru->list);
+ kfree(atsru);
+ }
+
+ return rc;
+}
+
+int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+{
+ int i;
+ struct pci_bus *bus;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ list_for_each_entry(atsru, &dmar_atsr_units, list) {
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ if (atsr->segment == pci_domain_nr(dev->bus))
+ goto found;
+ }
+
+ return 0;
+
+found:
+ for (bus = dev->bus; bus; bus = bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (!bridge || !bridge->is_pcie ||
+ bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+ for (i = 0; i < atsru->devices_cnt; i++)
+ if (atsru->devices[i] == bridge)
+ return 1;
+ break;
+ }
+ }
+
+ if (atsru->include_all)
+ return 1;
+
+ return 0;
+}
#endif
static void __init
@@ -274,22 +352,28 @@
{
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_atsr *atsr;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd = (struct acpi_dmar_hardware_unit *)header;
+ drhd = container_of(header, struct acpi_dmar_hardware_unit,
+ header);
printk (KERN_INFO PREFIX
- "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
- drhd->flags, (unsigned long long)drhd->address);
+ "DRHD base: %#016Lx flags: %#x\n",
+ (unsigned long long)drhd->address, drhd->flags);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- rmrr = (struct acpi_dmar_reserved_memory *)header;
-
+ rmrr = container_of(header, struct acpi_dmar_reserved_memory,
+ header);
printk (KERN_INFO PREFIX
- "RMRR base: 0x%016Lx end: 0x%016Lx\n",
+ "RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
+ case ACPI_DMAR_TYPE_ATSR:
+ atsr = container_of(header, struct acpi_dmar_atsr, header);
+ printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+ break;
}
}
@@ -363,6 +447,11 @@
ret = dmar_parse_one_rmrr(entry_header);
#endif
break;
+ case ACPI_DMAR_TYPE_ATSR:
+#ifdef CONFIG_DMAR
+ ret = dmar_parse_one_atsr(entry_header);
+#endif
+ break;
default:
printk(KERN_WARNING PREFIX
"Unknown DMAR structure type\n");
@@ -431,11 +520,19 @@
#ifdef CONFIG_DMAR
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
+ struct dmar_atsr_unit *atsr, *atsr_n;
+
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
+
+ list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ ret = atsr_parse_dev(atsr);
+ if (ret)
+ return ret;
+ }
}
#endif
@@ -468,6 +565,9 @@
#ifdef CONFIG_DMAR
if (list_empty(&dmar_rmrr_units))
printk(KERN_INFO PREFIX "No RMRR found\n");
+
+ if (list_empty(&dmar_atsr_units))
+ printk(KERN_INFO PREFIX "No ATSR found\n");
#endif
#ifdef CONFIG_INTR_REMAP
@@ -515,6 +615,7 @@
u32 ver;
static int iommu_allocated = 0;
int agaw = 0;
+ int msagaw = 0;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -535,12 +636,20 @@
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
- "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
+ goto error;
+ }
+ msagaw = iommu_calculate_max_sagaw(iommu);
+ if (msagaw < 0) {
+ printk(KERN_ERR
+ "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto error;
}
#endif
iommu->agaw = agaw;
+ iommu->msagaw = msagaw;
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -590,7 +699,8 @@
*/
static inline void reclaim_free_desc(struct q_inval *qi)
{
- while (qi->desc_status[qi->free_tail] == QI_DONE) {
+ while (qi->desc_status[qi->free_tail] == QI_DONE ||
+ qi->desc_status[qi->free_tail] == QI_ABORT) {
qi->desc_status[qi->free_tail] = QI_FREE;
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
qi->free_cnt++;
@@ -600,10 +710,13 @@
static int qi_check_fault(struct intel_iommu *iommu, int index)
{
u32 fault;
- int head;
+ int head, tail;
struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH;
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+
fault = readl(iommu->reg + DMAR_FSTS_REG);
/*
@@ -613,7 +726,11 @@
*/
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
- if ((head >> 4) == index) {
+ if ((head >> DMAR_IQ_SHIFT) == index) {
+ printk(KERN_ERR "VT-d detected invalid descriptor: "
+ "low=%llx, high=%llx\n",
+ (unsigned long long)qi->desc[index].low,
+ (unsigned long long)qi->desc[index].high);
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
__iommu_flush_cache(iommu, &qi->desc[index],
@@ -623,6 +740,32 @@
}
}
+ /*
+ * If ITE happens, all pending wait_desc commands are aborted.
+ * No new descriptors are fetched until the ITE is cleared.
+ */
+ if (fault & DMA_FSTS_ITE) {
+ head = readl(iommu->reg + DMAR_IQH_REG);
+ head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+ head |= 1;
+ tail = readl(iommu->reg + DMAR_IQT_REG);
+ tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+
+ writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+
+ do {
+ if (qi->desc_status[head] == QI_IN_USE)
+ qi->desc_status[head] = QI_ABORT;
+ head = (head - 2 + QI_LENGTH) % QI_LENGTH;
+ } while (head != tail);
+
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+ }
+
+ if (fault & DMA_FSTS_ICE)
+ writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+
return 0;
}
@@ -632,7 +775,7 @@
*/
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{
- int rc = 0;
+ int rc;
struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc;
int wait_index, index;
@@ -643,6 +786,9 @@
hw = qi->desc;
+restart:
+ rc = 0;
+
spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -673,7 +819,7 @@
* update the HW tail register indicating the presence of
* new descriptors.
*/
- writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
+ writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
while (qi->desc_status[wait_index] != QI_DONE) {
/*
@@ -685,18 +831,21 @@
*/
rc = qi_check_fault(iommu, index);
if (rc)
- goto out;
+ break;
spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
}
-out:
- qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
+
+ qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags);
+ if (rc == -EAGAIN)
+ goto restart;
+
return rc;
}
@@ -714,41 +863,26 @@
qi_submit_sync(&desc, iommu);
}
-int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
- u64 type, int non_present_entry_flush)
+void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+ u64 type)
{
struct qi_desc desc;
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;
- return qi_submit_sync(&desc, iommu);
+ qi_submit_sync(&desc, iommu);
}
-int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- int non_present_entry_flush)
+void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type)
{
u8 dw = 0, dr = 0;
struct qi_desc desc;
int ih = 0;
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
if (cap_write_drain(iommu->cap))
dw = 1;
@@ -760,7 +894,28 @@
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);
- return qi_submit_sync(&desc, iommu);
+ qi_submit_sync(&desc, iommu);
+}
+
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ u64 addr, unsigned mask)
+{
+ struct qi_desc desc;
+
+ if (mask) {
+ BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+ addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+ } else
+ desc.high = QI_DEV_IOTLB_ADDR(addr);
+
+ if (qdep >= QI_DEV_IOTLB_MAX_INVS)
+ qdep = 0;
+
+ desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+ QI_DIOTLB_TYPE;
+
+ qi_submit_sync(&desc, iommu);
}
/*
@@ -790,7 +945,6 @@
cpu_relax();
iommu->gcmd &= ~DMA_GCMD_QIE;
-
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
@@ -804,7 +958,7 @@
*/
static void __dmar_enable_qi(struct intel_iommu *iommu)
{
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
struct q_inval *qi = iommu->qi;
@@ -818,9 +972,8 @@
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
- cmd = iommu->gcmd | DMA_GCMD_QIE;
iommu->gcmd |= DMA_GCMD_QIE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
@@ -1096,7 +1249,7 @@
set_irq_data(irq, NULL);
iommu->irq = 0;
destroy_irq(irq);
- return 0;
+ return ret;
}
ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 9aa4fe1..66f29bc 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -4,7 +4,7 @@
menuconfig HOTPLUG_PCI
tristate "Support for PCI Hotplug"
- depends on PCI && HOTPLUG
+ depends on PCI && HOTPLUG && SYSFS
---help---
Say Y here if you have a motherboard with a PCI Hotplug controller.
This allows you to add and remove PCI cards while the machine is
@@ -41,7 +41,7 @@
config HOTPLUG_PCI_COMPAQ
tristate "Compaq PCI Hotplug driver"
- depends on X86 && PCI_BIOS && PCI_LEGACY
+ depends on X86 && PCI_BIOS
help
Say Y here if you have a motherboard with a Compaq PCI Hotplug
controller.
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 43c10bd..4dd7114 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -77,7 +77,6 @@
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index de94f4f..a5b9f6a 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -72,7 +72,6 @@
static int get_latch_status(struct hotplug_slot *slot, u8 * value);
static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index afaf8f6..5383600 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -150,25 +150,25 @@
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
- SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
+ SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
MISC = offsetof(struct ctrl_reg, misc),
LED_CONTROL = offsetof(struct ctrl_reg, led_control),
INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
- INT_MASK = offsetof(struct ctrl_reg, int_mask),
- CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
+ INT_MASK = offsetof(struct ctrl_reg, int_mask),
+ CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
- GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
- NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
+ GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
+ NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
- SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
- CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
+ SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
+ CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
@@ -190,7 +190,9 @@
u32 reserved2;
} __attribute__ ((packed));
-/* offsets to the hotplug resource table registers based on the above structure layout */
+/* offsets to the hotplug resource table registers based on the above
+ * structure layout
+ */
enum hrt_offsets {
SIG0 = offsetof(struct hrt, sig0),
SIG1 = offsetof(struct hrt, sig1),
@@ -217,18 +219,20 @@
u16 pre_mem_length;
} __attribute__ ((packed));
-/* offsets to the hotplug slot resource table registers based on the above structure layout */
+/* offsets to the hotplug slot resource table registers based on the above
+ * structure layout
+ */
enum slot_rt_offsets {
DEV_FUNC = offsetof(struct slot_rt, dev_func),
- PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
- SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
- MAX_BUS = offsetof(struct slot_rt, max_bus),
- IO_BASE = offsetof(struct slot_rt, io_base),
- IO_LENGTH = offsetof(struct slot_rt, io_length),
- MEM_BASE = offsetof(struct slot_rt, mem_base),
- MEM_LENGTH = offsetof(struct slot_rt, mem_length),
- PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
- PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
+ PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
+ SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
+ MAX_BUS = offsetof(struct slot_rt, max_bus),
+ IO_BASE = offsetof(struct slot_rt, io_base),
+ IO_LENGTH = offsetof(struct slot_rt, io_length),
+ MEM_BASE = offsetof(struct slot_rt, mem_base),
+ MEM_LENGTH = offsetof(struct slot_rt, mem_length),
+ PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
+ PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
};
struct pci_func {
@@ -286,8 +290,8 @@
struct controller {
struct controller *next;
u32 ctrl_int_comp;
- struct mutex crit_sect; /* critical section mutex */
- void __iomem *hpc_reg; /* cookie for our pci controller location */
+ struct mutex crit_sect; /* critical section mutex */
+ void __iomem *hpc_reg; /* cookie for our pci controller location */
struct pci_resource *mem_head;
struct pci_resource *p_mem_head;
struct pci_resource *io_head;
@@ -299,7 +303,7 @@
u8 next_event;
u8 interrupt;
u8 cfgspc_irq;
- u8 bus; /* bus number for the pci hotplug controller */
+ u8 bus; /* bus number for the pci hotplug controller */
u8 rev;
u8 slot_device_offset;
u8 first_slot;
@@ -401,46 +405,57 @@
/* debugfs functions for the hotplug controller info */
-extern void cpqhp_initialize_debugfs (void);
-extern void cpqhp_shutdown_debugfs (void);
-extern void cpqhp_create_debugfs_files (struct controller *ctrl);
-extern void cpqhp_remove_debugfs_files (struct controller *ctrl);
+extern void cpqhp_initialize_debugfs(void);
+extern void cpqhp_shutdown_debugfs(void);
+extern void cpqhp_create_debugfs_files(struct controller *ctrl);
+extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-extern void cpqhp_pushbutton_thread (unsigned long event_pointer);
-extern irqreturn_t cpqhp_ctrl_intr (int IRQ, void *data);
-extern int cpqhp_find_available_resources (struct controller *ctrl, void __iomem *rom_start);
-extern int cpqhp_event_start_thread (void);
-extern void cpqhp_event_stop_thread (void);
-extern struct pci_func *cpqhp_slot_create (unsigned char busnumber);
-extern struct pci_func *cpqhp_slot_find (unsigned char bus, unsigned char device, unsigned char index);
-extern int cpqhp_process_SI (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_process_SS (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_hardware_test (struct controller *ctrl, int test_num);
+extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
+extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+extern int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+extern int cpqhp_event_start_thread(void);
+extern void cpqhp_event_stop_thread(void);
+extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
/* resource functions */
extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
/* pci functions */
-extern int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-extern int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot);
-extern int cpqhp_save_config (struct controller *ctrl, int busnumber, int is_hot_plug);
-extern int cpqhp_save_base_addr_length (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_configure_board (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot);
-extern int cpqhp_valid_replace (struct controller *ctrl, struct pci_func * func);
-extern void cpqhp_destroy_board_resources (struct pci_func * func);
-extern int cpqhp_return_board_resources (struct pci_func * func, struct resource_lists * resources);
-extern void cpqhp_destroy_resource_list (struct resource_lists * resources);
-extern int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func);
-extern int cpqhp_unconfigure_device (struct pci_func* func);
+extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
+ int is_hot_plug);
+extern int cpqhp_save_base_addr_length(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_used_resources(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_configure_board(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_slot_config(struct controller *ctrl,
+ struct pci_func *new_slot);
+extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+extern void cpqhp_destroy_board_resources(struct pci_func *func);
+extern int cpqhp_return_board_resources (struct pci_func *func,
+ struct resource_lists *resources);
+extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
+extern int cpqhp_configure_device(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_unconfigure_device(struct pci_func *func);
/* Global variables */
extern int cpqhp_debug;
extern int cpqhp_legacy_mode;
extern struct controller *cpqhp_ctrl_list;
extern struct pci_func *cpqhp_slot_list[256];
+extern struct irq_routing_table *cpqhp_routing_table;
/* these can be gotten rid of, but for debugging they are purty */
extern u8 cpqhp_nic_irq;
@@ -449,7 +464,7 @@
/* inline functions */
-static inline char *slot_name(struct slot *slot)
+static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
}
@@ -458,9 +473,9 @@
* return_resource
*
* Puts node back in the resource list pointed to by head
- *
*/
-static inline void return_resource(struct pci_resource **head, struct pci_resource *node)
+static inline void return_resource(struct pci_resource **head,
+ struct pci_resource *node)
{
if (!node || !head)
return;
@@ -471,7 +486,7 @@
static inline void set_SOGO(struct controller *ctrl)
{
u16 misc;
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc = (misc | 0x0001) & 0xFFFB;
writew(misc, ctrl->hpc_reg + MISC);
@@ -481,7 +496,7 @@
static inline void amber_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= (0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -491,7 +506,7 @@
static inline void amber_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -504,7 +519,7 @@
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= (0x01010000L << slot);
-
+
return led_control ? 1 : 0;
}
@@ -512,7 +527,7 @@
static inline void green_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= 0x0101L << slot;
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -521,7 +536,7 @@
static inline void green_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -531,7 +546,7 @@
static inline void green_LED_blink(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
led_control |= (0x0001L << slot);
@@ -575,22 +590,21 @@
}
-/*
+/**
* get_controller_speed - find the current frequency/mode of controller.
*
* @ctrl: controller to get frequency/mode for.
*
* Returns controller speed.
- *
*/
static inline u8 get_controller_speed(struct controller *ctrl)
{
u8 curr_freq;
- u16 misc;
-
+ u16 misc;
+
if (ctrl->pcix_support) {
curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
- if ((curr_freq & 0xB0) == 0xB0)
+ if ((curr_freq & 0xB0) == 0xB0)
return PCI_SPEED_133MHz_PCIX;
if ((curr_freq & 0xA0) == 0xA0)
return PCI_SPEED_100MHz_PCIX;
@@ -602,19 +616,18 @@
return PCI_SPEED_33MHz;
}
- misc = readw(ctrl->hpc_reg + MISC);
- return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
+ misc = readw(ctrl->hpc_reg + MISC);
+ return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
}
-
-/*
+
+/**
* get_adapter_speed - find the max supported frequency/mode of adapter.
*
* @ctrl: hotplug controller.
* @hp_slot: hotplug slot where adapter is installed.
*
* Returns adapter speed.
- *
*/
static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
{
@@ -672,7 +685,8 @@
}
-static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slot)
+static inline int cpq_get_latch_status(struct controller *ctrl,
+ struct slot *slot)
{
u32 status;
u8 hp_slot;
@@ -687,7 +701,8 @@
}
-static inline int get_presence_status(struct controller *ctrl, struct slot *slot)
+static inline int get_presence_status(struct controller *ctrl,
+ struct slot *slot)
{
int presence_save = 0;
u8 hp_slot;
@@ -696,7 +711,8 @@
hp_slot = slot->device - ctrl->slot_device_offset;
tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
- presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> hp_slot) & 0x02;
+ presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
+ >> hp_slot) & 0x02;
return presence_save;
}
@@ -718,5 +734,12 @@
return retval;
}
-#endif
+#include <asm/pci_x86.h>
+static inline int cpqhp_routing_table_length(void)
+{
+ BUG_ON(cpqhp_routing_table == NULL);
+ return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
+ sizeof(struct irq_info));
+}
+#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c2e1bcb..075b4f4 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -25,8 +25,7 @@
* Send feedback to <greg@kroah.com>
*
* Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
- * Torben Mathiasen <torben.mathiasen@hp.com>
- *
+ * Torben Mathiasen <torben.mathiasen@hp.com>
*/
#include <linux/module.h>
@@ -45,7 +44,6 @@
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
/* Global variables */
@@ -53,6 +51,7 @@
int cpqhp_legacy_mode;
struct controller *cpqhp_ctrl_list; /* = NULL */
struct pci_func *cpqhp_slot_list[256];
+struct irq_routing_table *cpqhp_routing_table;
/* local variables */
static void __iomem *smbios_table;
@@ -78,33 +77,6 @@
#define CPQHPC_MODULE_MINOR 208
-static int one_time_init (void);
-static int set_attention_status (struct hotplug_slot *slot, u8 value);
-static int process_SI (struct hotplug_slot *slot);
-static int process_SS (struct hotplug_slot *slot);
-static int hardware_test (struct hotplug_slot *slot, u32 value);
-static int get_power_status (struct hotplug_slot *slot, u8 *value);
-static int get_attention_status (struct hotplug_slot *slot, u8 *value);
-static int get_latch_status (struct hotplug_slot *slot, u8 *value);
-static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-
-static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
- .set_attention_status = set_attention_status,
- .enable_slot = process_SI,
- .disable_slot = process_SS,
- .hardware_test = hardware_test,
- .get_power_status = get_power_status,
- .get_attention_status = get_attention_status,
- .get_latch_status = get_latch_status,
- .get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
-};
-
-
static inline int is_slot64bit(struct slot *slot)
{
return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
@@ -144,7 +116,7 @@
break;
}
}
-
+
if (!status)
fp = NULL;
@@ -171,7 +143,7 @@
tempdword = ctrl->first_slot;
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // Loop through slots
+ /* Loop through slots */
while (number_of_slots) {
physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
@@ -182,41 +154,42 @@
return 0;
}
-
-/* nice debugging output */
-static int pci_print_IRQ_route (void)
+static int init_cpqhp_routing_table(void)
{
- struct irq_routing_table *routing_table;
int len;
- int loop;
- u8 tbus, tdevice, tslot;
-
- routing_table = pcibios_get_irq_routing_table();
- if (routing_table == NULL) {
- err("No BIOS Routing Table??? Not good\n");
+ cpqhp_routing_table = pcibios_get_irq_routing_table();
+ if (cpqhp_routing_table == NULL)
return -ENOMEM;
- }
- len = (routing_table->size - sizeof(struct irq_routing_table)) /
- sizeof(struct irq_info);
- // Make sure I got at least one entry
+ len = cpqhp_routing_table_length();
if (len == 0) {
- kfree(routing_table);
+ kfree(cpqhp_routing_table);
+ cpqhp_routing_table = NULL;
return -1;
}
- dbg("bus dev func slot\n");
+ return 0;
+}
+/* nice debugging output */
+static void pci_print_IRQ_route(void)
+{
+ int len;
+ int loop;
+ u8 tbus, tdevice, tslot;
+
+ len = cpqhp_routing_table_length();
+
+ dbg("bus dev func slot\n");
for (loop = 0; loop < len; ++loop) {
- tbus = routing_table->slots[loop].bus;
- tdevice = routing_table->slots[loop].devfn;
- tslot = routing_table->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
}
- kfree(routing_table);
- return 0;
+ return;
}
@@ -242,9 +215,9 @@
void __iomem *p_max;
if (!smbios_table || !curr)
- return(NULL);
+ return NULL;
- // set p_max to the end of the table
+ /* set p_max to the end of the table */
p_max = smbios_start + readw(smbios_table + ST_LENGTH);
p_temp = curr;
@@ -253,20 +226,19 @@
while ((p_temp < p_max) && !bail) {
/* Look for the double NULL terminator
* The first condition is the previous byte
- * and the second is the curr */
- if (!previous_byte && !(readb(p_temp))) {
+ * and the second is the curr
+ */
+ if (!previous_byte && !(readb(p_temp)))
bail = 1;
- }
previous_byte = readb(p_temp);
p_temp++;
}
- if (p_temp < p_max) {
+ if (p_temp < p_max)
return p_temp;
- } else {
+ else
return NULL;
- }
}
@@ -292,21 +264,18 @@
if (!smbios_table)
return NULL;
- if (!previous) {
+ if (!previous)
previous = smbios_start;
- } else {
+ else
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- }
- while (previous) {
- if (readb(previous + SMBIOS_GENERIC_TYPE) != type) {
+ while (previous)
+ if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- } else {
+ else
break;
- }
- }
return previous;
}
@@ -322,144 +291,6 @@
kfree(slot);
}
-#define SLOT_NAME_SIZE 10
-
-static int ctrl_slot_setup(struct controller *ctrl,
- void __iomem *smbios_start,
- void __iomem *smbios_table)
-{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *hotplug_slot_info;
- u8 number_of_slots;
- u8 slot_device;
- u8 slot_number;
- u8 ctrl_slot;
- u32 tempdword;
- char name[SLOT_NAME_SIZE];
- void __iomem *slot_entry= NULL;
- int result = -ENOMEM;
-
- dbg("%s\n", __func__);
-
- tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
-
- number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
- slot_number = ctrl->first_slot;
-
- while (number_of_slots) {
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- goto error;
-
- slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
- GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
- hotplug_slot = slot->hotplug_slot;
-
- hotplug_slot->info =
- kzalloc(sizeof(*(hotplug_slot->info)),
- GFP_KERNEL);
- if (!hotplug_slot->info)
- goto error_hpslot;
- hotplug_slot_info = hotplug_slot->info;
-
- slot->ctrl = ctrl;
- slot->bus = ctrl->bus;
- slot->device = slot_device;
- slot->number = slot_number;
- dbg("slot->number = %u\n", slot->number);
-
- slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
- slot_entry);
-
- while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
- slot->number)) {
- slot_entry = get_SMBIOS_entry(smbios_start,
- smbios_table, 9, slot_entry);
- }
-
- slot->p_sm_slot = slot_entry;
-
- init_timer(&slot->task_event);
- slot->task_event.expires = jiffies + 5 * HZ;
- slot->task_event.function = cpqhp_pushbutton_thread;
-
- //FIXME: these capabilities aren't used but if they are
- // they need to be correctly implemented
- slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
- slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
-
- if (is_slot64bit(slot))
- slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
- if (is_slot66mhz(slot))
- slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
- if (ctrl->speed == PCI_SPEED_66MHz)
- slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
-
- ctrl_slot =
- slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
-
- // Check presence
- slot->capabilities |=
- ((((~tempdword) >> 23) |
- ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
- // Check the switch state
- slot->capabilities |=
- ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
- // Check the slot enable
- slot->capabilities |=
- ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
-
- /* register this slot with the hotplug pci core */
- hotplug_slot->release = &release_slot;
- hotplug_slot->private = slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
- hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
- hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
- hotplug_slot_info->attention_status =
- cpq_get_attention_status(ctrl, slot);
- hotplug_slot_info->latch_status =
- cpq_get_latch_status(ctrl, slot);
- hotplug_slot_info->adapter_status =
- get_presence_status(ctrl, slot);
-
- dbg("registering bus %d, dev %d, number %d, "
- "ctrl->slot_device_offset %d, slot %d\n",
- slot->bus, slot->device,
- slot->number, ctrl->slot_device_offset,
- slot_number);
- result = pci_hp_register(hotplug_slot,
- ctrl->pci_dev->bus,
- slot->device,
- name);
- if (result) {
- err("pci_hp_register failed with error %d\n", result);
- goto error_info;
- }
-
- slot->next = ctrl->slot;
- ctrl->slot = slot;
-
- number_of_slots--;
- slot_device++;
- slot_number++;
- }
-
- return 0;
-error_info:
- kfree(hotplug_slot_info);
-error_hpslot:
- kfree(hotplug_slot);
-error_slot:
- kfree(slot);
-error:
- return result;
-}
-
static int ctrl_slot_cleanup (struct controller * ctrl)
{
struct slot *old_slot, *next_slot;
@@ -476,36 +307,32 @@
cpqhp_remove_debugfs_files(ctrl);
- //Free IRQ associated with hot plug device
+ /* Free IRQ associated with hot plug device */
free_irq(ctrl->interrupt, ctrl);
- //Unmap the memory
+ /* Unmap the memory */
iounmap(ctrl->hpc_reg);
- //Finally reclaim PCI mem
+ /* Finally reclaim PCI mem */
release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
pci_resource_len(ctrl->pci_dev, 0));
- return(0);
+ return 0;
}
-//============================================================================
-// function: get_slot_mapping
-//
-// Description: Attempts to determine a logical slot mapping for a PCI
-// device. Won't work for more than one PCI-PCI bridge
-// in a slot.
-//
-// Input: u8 bus_num - bus number of PCI device
-// u8 dev_num - device number of PCI device
-// u8 *slot - Pointer to u8 where slot number will
-// be returned
-//
-// Output: SUCCESS or FAILURE
-//=============================================================================
+/**
+ * get_slot_mapping - determine logical slot mapping for PCI device
+ *
+ * Won't work for more than one PCI-PCI bridge in a slot.
+ *
+ * @bus_num - bus number of PCI device
+ * @dev_num - device number of PCI device
+ * @slot - Pointer to u8 where slot number will be returned
+ *
+ * Output: SUCCESS or FAILURE
+ */
static int
get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
u32 work;
long len;
long loop;
@@ -516,36 +343,25 @@
bridgeSlot = 0xFF;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength);
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn >> 3;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if ((tbus == bus_num) && (tdevice == dev_num)) {
*slot = tslot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
} else {
/* Did not get a match on the target PCI device. Check
- * if the current IRQ table entry is a PCI-to-PCI bridge
- * device. If so, and it's secondary bus matches the
- * bus number for the target device, I need to save the
- * bridge's slot number. If I can not find an entry for
- * the target device, I will have to assume it's on the
- * other side of the bridge, and assign it the bridge's
- * slot. */
+ * if the current IRQ table entry is a PCI-to-PCI
+ * bridge device. If so, and it's secondary bus
+ * matches the bus number for the target device, I need
+ * to save the bridge's slot number. If I can not find
+ * an entry for the target device, I will have to
+ * assume it's on the other side of the bridge, and
+ * assign it the bridge's slot.
+ */
bus->number = tbus;
pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
PCI_CLASS_REVISION, &work);
@@ -555,25 +371,23 @@
PCI_DEVFN(tdevice, 0),
PCI_PRIMARY_BUS, &work);
// See if bridge's secondary bus matches target bus.
- if (((work >> 8) & 0x000000FF) == (long) bus_num) {
+ if (((work >> 8) & 0x000000FF) == (long) bus_num)
bridgeSlot = tslot;
- }
}
}
}
- // If we got here, we didn't find an entry in the IRQ mapping table
- // for the target PCI device. If we did determine that the target
- // device is on the other side of a PCI-to-PCI bridge, return the
- // slot number for the bridge.
+ /* If we got here, we didn't find an entry in the IRQ mapping table for
+ * the target PCI device. If we did determine that the target device
+ * is on the other side of a PCI-to-PCI bridge, return the slot number
+ * for the bridge.
+ */
if (bridgeSlot != 0xFF) {
*slot = bridgeSlot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
}
- kfree(PCIIRQRoutingInfoLength);
- // Couldn't find an entry in the routing table for this PCI device
+ /* Couldn't find an entry in the routing table for this PCI device */
return -1;
}
@@ -591,32 +405,32 @@
u8 hp_slot;
if (func == NULL)
- return(1);
+ return 1;
hp_slot = func->device - ctrl->slot_device_offset;
- // Wait for exclusive access to hardware
+ /* Wait for exclusive access to hardware */
mutex_lock(&ctrl->crit_sect);
- if (status == 1) {
+ if (status == 1)
amber_LED_on (ctrl, hp_slot);
- } else if (status == 0) {
+ else if (status == 0)
amber_LED_off (ctrl, hp_slot);
- } else {
- // Done with exclusive hardware access
+ else {
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(1);
+ return 1;
}
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(0);
+ return 0;
}
@@ -719,7 +533,7 @@
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
- return cpqhp_hardware_test(ctrl, value);
+ return cpqhp_hardware_test(ctrl, value);
}
@@ -738,7 +552,7 @@
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
-
+
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_attention_status(ctrl, slot);
@@ -793,6 +607,230 @@
return 0;
}
+static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+ .set_attention_status = set_attention_status,
+ .enable_slot = process_SI,
+ .disable_slot = process_SS,
+ .hardware_test = hardware_test,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+ .get_max_bus_speed = get_max_bus_speed,
+ .get_cur_bus_speed = get_cur_bus_speed,
+};
+
+#define SLOT_NAME_SIZE 10
+
+static int ctrl_slot_setup(struct controller *ctrl,
+ void __iomem *smbios_start,
+ void __iomem *smbios_table)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *hotplug_slot_info;
+ u8 number_of_slots;
+ u8 slot_device;
+ u8 slot_number;
+ u8 ctrl_slot;
+ u32 tempdword;
+ char name[SLOT_NAME_SIZE];
+ void __iomem *slot_entry= NULL;
+ int result = -ENOMEM;
+
+ dbg("%s\n", __func__);
+
+ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
+ slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
+ slot_number = ctrl->first_slot;
+
+ while (number_of_slots) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
+ GFP_KERNEL);
+ if (!slot->hotplug_slot)
+ goto error_slot;
+ hotplug_slot = slot->hotplug_slot;
+
+ hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
+ GFP_KERNEL);
+ if (!hotplug_slot->info)
+ goto error_hpslot;
+ hotplug_slot_info = hotplug_slot->info;
+
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->bus;
+ slot->device = slot_device;
+ slot->number = slot_number;
+ dbg("slot->number = %u\n", slot->number);
+
+ slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
+ slot_entry);
+
+ while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
+ slot->number)) {
+ slot_entry = get_SMBIOS_entry(smbios_start,
+ smbios_table, 9, slot_entry);
+ }
+
+ slot->p_sm_slot = slot_entry;
+
+ init_timer(&slot->task_event);
+ slot->task_event.expires = jiffies + 5 * HZ;
+ slot->task_event.function = cpqhp_pushbutton_thread;
+
+ /*FIXME: these capabilities aren't used but if they are
+ * they need to be correctly implemented
+ */
+ slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
+ slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
+
+ if (is_slot64bit(slot))
+ slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
+ if (is_slot66mhz(slot))
+ slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
+ if (ctrl->speed == PCI_SPEED_66MHz)
+ slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
+
+ ctrl_slot =
+ slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
+
+ /* Check presence */
+ slot->capabilities |=
+ ((((~tempdword) >> 23) |
+ ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
+ /* Check the switch state */
+ slot->capabilities |=
+ ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
+ /* Check the slot enable */
+ slot->capabilities |=
+ ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
+
+ /* register this slot with the hotplug pci core */
+ hotplug_slot->release = &release_slot;
+ hotplug_slot->private = slot;
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
+ hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
+
+ hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
+ hotplug_slot_info->attention_status =
+ cpq_get_attention_status(ctrl, slot);
+ hotplug_slot_info->latch_status =
+ cpq_get_latch_status(ctrl, slot);
+ hotplug_slot_info->adapter_status =
+ get_presence_status(ctrl, slot);
+
+ dbg("registering bus %d, dev %d, number %d, "
+ "ctrl->slot_device_offset %d, slot %d\n",
+ slot->bus, slot->device,
+ slot->number, ctrl->slot_device_offset,
+ slot_number);
+ result = pci_hp_register(hotplug_slot,
+ ctrl->pci_dev->bus,
+ slot->device,
+ name);
+ if (result) {
+ err("pci_hp_register failed with error %d\n", result);
+ goto error_info;
+ }
+
+ slot->next = ctrl->slot;
+ ctrl->slot = slot;
+
+ number_of_slots--;
+ slot_device++;
+ slot_number++;
+ }
+
+ return 0;
+error_info:
+ kfree(hotplug_slot_info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return result;
+}
+
+static int one_time_init(void)
+{
+ int loop;
+ int retval = 0;
+
+ if (initialized)
+ return 0;
+
+ power_mode = 0;
+
+ retval = init_cpqhp_routing_table();
+ if (retval)
+ goto error;
+
+ if (cpqhp_debug)
+ pci_print_IRQ_route();
+
+ dbg("Initialize + Start the notification mechanism \n");
+
+ retval = cpqhp_event_start_thread();
+ if (retval)
+ goto error;
+
+ dbg("Initialize slot lists\n");
+ for (loop = 0; loop < 256; loop++)
+ cpqhp_slot_list[loop] = NULL;
+
+ /* FIXME: We also need to hook the NMI handler eventually.
+ * this also needs to be worked with Christoph
+ * register_NMI_handler();
+ */
+ /* Map rom address */
+ cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
+ if (!cpqhp_rom_start) {
+ err ("Could not ioremap memory region for ROM\n");
+ retval = -EIO;
+ goto error;
+ }
+
+ /* Now, map the int15 entry point if we are on compaq specific
+ * hardware
+ */
+ compaq_nvram_init(cpqhp_rom_start);
+
+ /* Map smbios table entry point structure */
+ smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
+ cpqhp_rom_start + ROM_PHY_LEN);
+ if (!smbios_table) {
+ err ("Could not find the SMBIOS pointer in memory\n");
+ retval = -EIO;
+ goto error_rom_start;
+ }
+
+ smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
+ readw(smbios_table + ST_LENGTH));
+ if (!smbios_start) {
+ err ("Could not ioremap memory region taken from SMBIOS values\n");
+ retval = -EIO;
+ goto error_smbios_start;
+ }
+
+ initialized = 1;
+
+ return retval;
+
+error_smbios_start:
+ iounmap(smbios_start);
+error_rom_start:
+ iounmap(cpqhp_rom_start);
+error:
+ return retval;
+}
+
static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 num_of_slots = 0;
@@ -815,7 +853,9 @@
return err;
}
- // Need to read VID early b/c it's used to differentiate CPQ and INTC discovery
+ /* Need to read VID early b/c it's used to differentiate CPQ and INTC
+ * discovery
+ */
rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
err(msg_HPC_non_compaq_or_intel);
@@ -832,217 +872,209 @@
}
/* Check for the proper subsytem ID's
- * Intel uses a different SSID programming model than Compaq.
+ * Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
*/
- if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) {
- // TODO: This code can be made to support non-Compaq or Intel subsystem IDs
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_disable_device;
- }
- dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
- if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
- err(msg_HPC_non_compaq_or_intel);
- rc = -ENODEV;
- goto err_disable_device;
- }
-
- ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
- if (!ctrl) {
- err("%s : out of memory\n", __func__);
- rc = -ENOMEM;
- goto err_disable_device;
- }
-
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_free_ctrl;
- }
-
- info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
-
- /* Set Vendor ID, so it can be accessed later from other functions */
- ctrl->vendor_id = vendor_id;
-
- switch (subsystem_vid) {
- case PCI_VENDOR_ID_COMPAQ:
- if (pdev->revision >= 0x13) { /* CIOBX */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 1;
- pci_read_config_byte(pdev, 0x41, &bus_cap);
- if (bus_cap & 0x80) {
- dbg("bus max supports 133MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
- break;
- }
- if (bus_cap & 0x40) {
- dbg("bus max supports 100MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- break;
- }
- if (bus_cap & 20) {
- dbg("bus max supports 66MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
- break;
- }
- if (bus_cap & 10) {
- dbg("bus max supports 66MHz PCI\n");
- ctrl->speed_capability = PCI_SPEED_66MHz;
- break;
- }
-
- break;
- }
-
- switch (subsystem_deviceid) {
- case PCI_SUB_HPC_ID:
- /* Original 6500/7000 implementation */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID2:
- /* First Pushbutton implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID_INTC:
- /* Third party (6500/7000) */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID3:
- /* First 66 Mhz implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_66MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID4:
- /* First PCI-X implementation, 100MHz */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 0;
- break;
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
- }
- break;
-
- case PCI_VENDOR_ID_INTEL:
- /* Check for speed capability (0=33, 1=66) */
- if (subsystem_deviceid & 0x0001) {
- ctrl->speed_capability = PCI_SPEED_66MHz;
- } else {
- ctrl->speed_capability = PCI_SPEED_33MHz;
- }
-
- /* Check for push button */
- if (subsystem_deviceid & 0x0002) {
- /* no push button */
- ctrl->push_button = 0;
- } else {
- /* push button supported */
- ctrl->push_button = 1;
- }
-
- /* Check for slot switch type (0=mechanical, 1=not mechanical) */
- if (subsystem_deviceid & 0x0004) {
- /* no switch */
- ctrl->slot_switch_type = 0;
- } else {
- /* switch */
- ctrl->slot_switch_type = 1;
- }
-
- /* PHP Status (0=De-feature PHP, 1=Normal operation) */
- if (subsystem_deviceid & 0x0008) {
- ctrl->defeature_PHP = 1; // PHP supported
- } else {
- ctrl->defeature_PHP = 0; // PHP not supported
- }
-
- /* Alternate Base Address Register Interface (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0010) {
- ctrl->alternate_base_address = 1; // supported
- } else {
- ctrl->alternate_base_address = 0; // not supported
- }
-
- /* PCI Config Space Index (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0020) {
- ctrl->pci_config_space = 1; // supported
- } else {
- ctrl->pci_config_space = 0; // not supported
- }
-
- /* PCI-X support */
- if (subsystem_deviceid & 0x0080) {
- /* PCI-X capable */
- ctrl->pcix_support = 1;
- /* Frequency of operation in PCI-X mode */
- if (subsystem_deviceid & 0x0040) {
- /* 133MHz PCI-X if bit 7 is 1 */
- ctrl->pcix_speed_capability = 1;
- } else {
- /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
- /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
- ctrl->pcix_speed_capability = 0;
- }
- } else {
- /* Conventional PCI */
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- }
- break;
-
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
- }
-
- } else {
+ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
return -ENODEV;
}
- // Tell the user that we found one.
+ /* TODO: This code can be made to support non-Compaq or Intel
+ * subsystem IDs
+ */
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_disable_device;
+ }
+ dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
+ if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_non_compaq_or_intel);
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
+
+ ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
+ if (!ctrl) {
+ err("%s : out of memory\n", __func__);
+ rc = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_free_ctrl;
+ }
+
+ info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
+
+ /* Set Vendor ID, so it can be accessed later from other
+ * functions
+ */
+ ctrl->vendor_id = vendor_id;
+
+ switch (subsystem_vid) {
+ case PCI_VENDOR_ID_COMPAQ:
+ if (pdev->revision >= 0x13) { /* CIOBX */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 1;
+ pci_read_config_byte(pdev, 0x41, &bus_cap);
+ if (bus_cap & 0x80) {
+ dbg("bus max supports 133MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 0x40) {
+ dbg("bus max supports 100MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 20) {
+ dbg("bus max supports 66MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 10) {
+ dbg("bus max supports 66MHz PCI\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ break;
+ }
+
+ break;
+ }
+
+ switch (subsystem_deviceid) {
+ case PCI_SUB_HPC_ID:
+ /* Original 6500/7000 implementation */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID2:
+ /* First Pushbutton implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID_INTC:
+ /* Third party (6500/7000) */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID3:
+ /* First 66 Mhz implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID4:
+ /* First PCI-X implementation, 100MHz */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
+ }
+ break;
+
+ case PCI_VENDOR_ID_INTEL:
+ /* Check for speed capability (0=33, 1=66) */
+ if (subsystem_deviceid & 0x0001)
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ else
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+
+ /* Check for push button */
+ if (subsystem_deviceid & 0x0002)
+ ctrl->push_button = 0;
+ else
+ ctrl->push_button = 1;
+
+ /* Check for slot switch type (0=mechanical, 1=not mechanical) */
+ if (subsystem_deviceid & 0x0004)
+ ctrl->slot_switch_type = 0;
+ else
+ ctrl->slot_switch_type = 1;
+
+ /* PHP Status (0=De-feature PHP, 1=Normal operation) */
+ if (subsystem_deviceid & 0x0008)
+ ctrl->defeature_PHP = 1; /* PHP supported */
+ else
+ ctrl->defeature_PHP = 0; /* PHP not supported */
+
+ /* Alternate Base Address Register Interface
+ * (0=not supported, 1=supported)
+ */
+ if (subsystem_deviceid & 0x0010)
+ ctrl->alternate_base_address = 1;
+ else
+ ctrl->alternate_base_address = 0;
+
+ /* PCI Config Space Index (0=not supported, 1=supported) */
+ if (subsystem_deviceid & 0x0020)
+ ctrl->pci_config_space = 1;
+ else
+ ctrl->pci_config_space = 0;
+
+ /* PCI-X support */
+ if (subsystem_deviceid & 0x0080) {
+ ctrl->pcix_support = 1;
+ if (subsystem_deviceid & 0x0040)
+ /* 133MHz PCI-X if bit 7 is 1 */
+ ctrl->pcix_speed_capability = 1;
+ else
+ /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
+ /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
+ ctrl->pcix_speed_capability = 0;
+ } else {
+ /* Conventional PCI */
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ }
+ break;
+
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
+ }
+
+ /* Tell the user that we found one. */
info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
pdev->bus->number);
@@ -1087,7 +1119,7 @@
if (rc) {
goto err_free_bus;
}
-
+
dbg("pdev = %p\n", pdev);
dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
@@ -1109,7 +1141,7 @@
goto err_free_mem_region;
}
- // Check for 66Mhz operation
+ /* Check for 66Mhz operation */
ctrl->speed = get_controller_speed(ctrl);
@@ -1120,7 +1152,7 @@
*
********************************************************/
- // find the physical slot number of the first hot plug slot
+ /* find the physical slot number of the first hot plug slot */
/* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
@@ -1137,7 +1169,7 @@
goto err_iounmap;
}
- // Store PCI Config Space for all devices on this bus
+ /* Store PCI Config Space for all devices on this bus */
rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
if (rc) {
err("%s: unable to save PCI configuration data, error %d\n",
@@ -1148,7 +1180,7 @@
/*
* Get IO, memory, and IRQ resources for new devices
*/
- // The next line is required for cpqhp_find_available_resources
+ /* The next line is required for cpqhp_find_available_resources */
ctrl->interrupt = pdev->irq;
if (ctrl->interrupt < 0x10) {
cpqhp_legacy_mode = 1;
@@ -1182,7 +1214,7 @@
__func__, rc);
goto err_iounmap;
}
-
+
/* Mask all general input interrupts */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
@@ -1196,12 +1228,14 @@
goto err_iounmap;
}
- /* Enable Shift Out interrupt and clear it, also enable SERR on power fault */
+ /* Enable Shift Out interrupt and clear it, also enable SERR on power
+ * fault
+ */
temp_word = readw(ctrl->hpc_reg + MISC);
temp_word |= 0x4006;
writew(temp_word, ctrl->hpc_reg + MISC);
- // Changed 05/05/97 to clear all interrupts at start
+ /* Changed 05/05/97 to clear all interrupts at start */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -1216,13 +1250,14 @@
cpqhp_ctrl_list = ctrl;
}
- // turn off empty slots here unless command line option "ON" set
- // Wait for exclusive access to hardware
+ /* turn off empty slots here unless command line option "ON" set
+ * Wait for exclusive access to hardware
+ */
mutex_lock(&ctrl->crit_sect);
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // find first device number for the ctrl
+ /* find first device number for the ctrl */
device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
while (num_of_slots) {
@@ -1234,23 +1269,21 @@
hp_slot = func->device - ctrl->slot_device_offset;
dbg("hp_slot: %d\n", hp_slot);
- // We have to save the presence info for these slots
+ /* We have to save the presence info for these slots */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
- if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
func->switch_save = 0;
- } else {
+ else
func->switch_save = 0x10;
- }
- if (!power_mode) {
+ if (!power_mode)
if (!func->is_a_board) {
green_LED_off(ctrl, hp_slot);
slot_disable(ctrl, hp_slot);
}
- }
device++;
num_of_slots--;
@@ -1258,7 +1291,7 @@
if (!power_mode) {
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq(ctrl);
}
@@ -1269,7 +1302,7 @@
goto err_free_irq;
}
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
cpqhp_create_debugfs_files(ctrl);
@@ -1291,77 +1324,6 @@
return rc;
}
-
-static int one_time_init(void)
-{
- int loop;
- int retval = 0;
-
- if (initialized)
- return 0;
-
- power_mode = 0;
-
- retval = pci_print_IRQ_route();
- if (retval)
- goto error;
-
- dbg("Initialize + Start the notification mechanism \n");
-
- retval = cpqhp_event_start_thread();
- if (retval)
- goto error;
-
- dbg("Initialize slot lists\n");
- for (loop = 0; loop < 256; loop++) {
- cpqhp_slot_list[loop] = NULL;
- }
-
- // FIXME: We also need to hook the NMI handler eventually.
- // this also needs to be worked with Christoph
- // register_NMI_handler();
-
- // Map rom address
- cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
- if (!cpqhp_rom_start) {
- err ("Could not ioremap memory region for ROM\n");
- retval = -EIO;
- goto error;
- }
-
- /* Now, map the int15 entry point if we are on compaq specific hardware */
- compaq_nvram_init(cpqhp_rom_start);
-
- /* Map smbios table entry point structure */
- smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
- cpqhp_rom_start + ROM_PHY_LEN);
- if (!smbios_table) {
- err ("Could not find the SMBIOS pointer in memory\n");
- retval = -EIO;
- goto error_rom_start;
- }
-
- smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
- readw(smbios_table + ST_LENGTH));
- if (!smbios_start) {
- err ("Could not ioremap memory region taken from SMBIOS values\n");
- retval = -EIO;
- goto error_smbios_start;
- }
-
- initialized = 1;
-
- return retval;
-
-error_smbios_start:
- iounmap(smbios_start);
-error_rom_start:
- iounmap(cpqhp_rom_start);
-error:
- return retval;
-}
-
-
static void __exit unload_cpqphpd(void)
{
struct pci_func *next;
@@ -1381,10 +1343,10 @@
if (ctrl->hpc_reg) {
u16 misc;
rc = read_slot_enable (ctrl);
-
+
writeb(0, ctrl->hpc_reg + SLOT_SERR);
writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc &= 0xFFFD;
writew(misc, ctrl->hpc_reg + MISC);
@@ -1464,38 +1426,34 @@
}
}
- // Stop the notification mechanism
+ /* Stop the notification mechanism */
if (initialized)
cpqhp_event_stop_thread();
- //unmap the rom address
+ /* unmap the rom address */
if (cpqhp_rom_start)
iounmap(cpqhp_rom_start);
if (smbios_start)
iounmap(smbios_start);
}
-
-
static struct pci_device_id hpcd_pci_tbl[] = {
{
/* handle any PCI Hotplug controller */
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
.class_mask = ~0,
-
+
/* no matter who makes it */
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
-
+
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
-
-
static struct pci_driver cpqhpc_driver = {
.name = "compaq_pci_hotplug",
.id_table = hpcd_pci_tbl,
@@ -1503,8 +1461,6 @@
/* remove: cpqhpc_remove_one, */
};
-
-
static int __init cpqhpc_init(void)
{
int result;
@@ -1518,7 +1474,6 @@
return result;
}
-
static void __exit cpqhpc_cleanup(void)
{
dbg("unload_cpqphpd()\n");
@@ -1529,8 +1484,5 @@
cpqhp_shutdown_debugfs();
}
-
module_init(cpqhpc_init);
module_exit(cpqhpc_cleanup);
-
-
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index cc227a8..2fa47af 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -81,14 +81,15 @@
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x1L << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
/* this is the structure that tells the worker thread
- *what to do */
+ * what to do
+ */
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
ctrl->next_event = (ctrl->next_event + 1) % 10;
taskInfo->hp_slot = hp_slot;
@@ -100,17 +101,17 @@
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
- /**********************************
+ /*
* Switch opened
- **********************************/
+ */
func->switch_save = 0;
taskInfo->event_type = INT_SWITCH_OPEN;
} else {
- /**********************************
+ /*
* Switch closed
- **********************************/
+ */
func->switch_save = 0x10;
@@ -131,9 +132,8 @@
{
struct slot *slot = ctrl->slot;
- while (slot && (slot->device != device)) {
+ while (slot && (slot->device != device))
slot = slot->next;
- }
return slot;
}
@@ -152,17 +152,17 @@
if (!change)
return 0;
- /**********************************
+ /*
* Presence Change
- **********************************/
+ */
dbg("cpqsbd: Presence/Notify input change.\n");
dbg(" Changed bits are 0x%4.4x\n", change );
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x0101 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -177,22 +177,23 @@
return 0;
/* If the switch closed, must be a button
- * If not in button mode, nevermind */
+ * If not in button mode, nevermind
+ */
if (func->switch_save && (ctrl->push_button == 1)) {
temp_word = ctrl->ctrl_int_comp >> 16;
temp_byte = (temp_word >> hp_slot) & 0x01;
temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
if (temp_byte != func->presence_save) {
- /**************************************
+ /*
* button Pressed (doesn't do anything)
- **************************************/
+ */
dbg("hp_slot %d button pressed\n", hp_slot);
taskInfo->event_type = INT_BUTTON_PRESS;
} else {
- /**********************************
+ /*
* button Released - TAKE ACTION!!!!
- **********************************/
+ */
dbg("hp_slot %d button released\n", hp_slot);
taskInfo->event_type = INT_BUTTON_RELEASE;
@@ -210,7 +211,8 @@
}
} else {
/* Switch is open, assume a presence change
- * Save the presence state */
+ * Save the presence state
+ */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
@@ -241,17 +243,17 @@
if (!change)
return 0;
- /**********************************
+ /*
* power fault
- **********************************/
+ */
info("power fault interrupt\n");
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x01 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -262,16 +264,16 @@
rc++;
if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
- /**********************************
+ /*
* power fault Cleared
- **********************************/
+ */
func->status = 0x00;
taskInfo->event_type = INT_POWER_FAULT_CLEAR;
} else {
- /**********************************
+ /*
* power fault
- **********************************/
+ */
taskInfo->event_type = INT_POWER_FAULT;
if (ctrl->rev < 4) {
@@ -432,13 +434,15 @@
/* If we got here, there the bridge requires some of the resource, but
- * we may be able to split some off of the front */
+ * we may be able to split some off of the front
+ */
node = *head;
if (node->length & (alignment -1)) {
/* this one isn't an aligned length, so we'll make a new entry
- * and split it up. */
+ * and split it up.
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -544,10 +548,10 @@
if (!(*head))
return NULL;
- if ( cpqhp_resource_sort_and_combine(head) )
+ if (cpqhp_resource_sort_and_combine(head))
return NULL;
- if ( sort_by_size(head) )
+ if (sort_by_size(head))
return NULL;
for (node = *head; node; node = node->next) {
@@ -556,7 +560,8 @@
if (node->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -581,7 +586,8 @@
/* Don't need to check if too small since we already did */
if (node->length > size) {
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -601,7 +607,8 @@
continue;
/* If we got here, then it is the right size
- * Now take it out of the list and break */
+ * Now take it out of the list and break
+ */
if (*head == node) {
*head = node->next;
} else {
@@ -642,14 +649,16 @@
return NULL;
for (max = *head; max; max = max->next) {
- /* If not big enough we could probably just bail,
- * instead we'll continue to the next. */
+ /* If not big enough we could probably just bail,
+ * instead we'll continue to the next.
+ */
if (max->length < size)
continue;
if (max->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (max->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -672,7 +681,8 @@
if ((max->base + max->length) & (size - 1)) {
/* this one isn't end aligned properly at the top
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -744,7 +754,8 @@
if (node->base & (size - 1)) {
dbg("%s: not aligned\n", __func__);
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -769,7 +780,8 @@
if (node->length > size) {
dbg("%s: too big\n", __func__);
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -886,19 +898,19 @@
u32 Diff;
u32 temp_dword;
-
+
misc = readw(ctrl->hpc_reg + MISC);
- /***************************************
+ /*
* Check to see if it was our interrupt
- ***************************************/
+ */
if (!(misc & 0x000C)) {
return IRQ_NONE;
}
if (misc & 0x0004) {
- /**********************************
+ /*
* Serial Output interrupt Pending
- **********************************/
+ */
/* Clear the interrupt */
misc |= 0x0004;
@@ -961,11 +973,8 @@
struct pci_func *next;
new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
- if (new_slot == NULL) {
- /* I'm not dead yet!
- * You will be. */
+ if (new_slot == NULL)
return new_slot;
- }
new_slot->next = NULL;
new_slot->configured = 1;
@@ -996,10 +1005,8 @@
return 1;
next = cpqhp_slot_list[old_slot->bus];
-
- if (next == NULL) {
+ if (next == NULL)
return 1;
- }
if (next == old_slot) {
cpqhp_slot_list[old_slot->bus] = old_slot->next;
@@ -1008,9 +1015,8 @@
return 0;
}
- while ((next->next != old_slot) && (next->next != NULL)) {
+ while ((next->next != old_slot) && (next->next != NULL))
next = next->next;
- }
if (next->next == old_slot) {
next->next = old_slot->next;
@@ -1040,9 +1046,8 @@
for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
next = cpqhp_slot_list[tempBus];
- while (!slot_remove(next)) {
+ while (!slot_remove(next))
next = cpqhp_slot_list[tempBus];
- }
}
next = cpqhp_slot_list[bridge->bus];
@@ -1130,39 +1135,43 @@
u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
u16 reg16;
u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
-
+
if (ctrl->speed == adapter_speed)
return 0;
-
+
/* We don't allow freq/mode changes if we find another adapter running
- * in another slot on this controller */
+ * in another slot on this controller
+ */
for(slot = ctrl->slot; slot; slot = slot->next) {
- if (slot->device == (hp_slot + ctrl->slot_device_offset))
+ if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
if (!slot->hotplug_slot || !slot->hotplug_slot->info)
continue;
- if (slot->hotplug_slot->info->adapter_status == 0)
+ if (slot->hotplug_slot->info->adapter_status == 0)
continue;
/* If another adapter is running on the same segment but at a
* lower speed/mode, we allow the new adapter to function at
- * this rate if supported */
- if (ctrl->speed < adapter_speed)
+ * this rate if supported
+ */
+ if (ctrl->speed < adapter_speed)
return 0;
return 1;
}
-
+
/* If the controller doesn't support freq/mode changes and the
- * controller is running at a higher mode, we bail */
+ * controller is running at a higher mode, we bail
+ */
if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
return 1;
-
+
/* But we allow the adapter to run at a lower rate if possible */
if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
return 0;
/* We try to set the max speed supported by both the adapter and
- * controller */
+ * controller
+ */
if (ctrl->speed_capability < adapter_speed) {
if (ctrl->speed == ctrl->speed_capability)
return 0;
@@ -1171,22 +1180,22 @@
writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
-
- set_SOGO(ctrl);
+
+ set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
-
+
if (adapter_speed != PCI_SPEED_133MHz_PCIX)
reg = 0xF5;
else
- reg = 0xF4;
+ reg = 0xF4;
pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+
reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
reg16 &= ~0x000F;
switch(adapter_speed) {
- case(PCI_SPEED_133MHz_PCIX):
+ case(PCI_SPEED_133MHz_PCIX):
reg = 0x75;
- reg16 |= 0xB;
+ reg16 |= 0xB;
break;
case(PCI_SPEED_100MHz_PCIX):
reg = 0x74;
@@ -1203,48 +1212,48 @@
default: /* 33MHz PCI 2.2 */
reg = 0x71;
break;
-
+
}
reg16 |= 0xB << 12;
writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
-
- mdelay(5);
-
+
+ mdelay(5);
+
/* Reenable interrupts */
writel(0, ctrl->hpc_reg + INT_MASK);
- pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+ pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
+
/* Restart state machine */
reg = ~0xF;
pci_read_config_byte(ctrl->pci_dev, 0x43, ®);
pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
-
+
/* Only if mode change...*/
if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
-
+
wait_for_ctrl_irq(ctrl);
mdelay(1100);
-
+
/* Restore LED/Slot state */
writel(leds, ctrl->hpc_reg + LED_CONTROL);
writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
-
+
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
ctrl->speed = adapter_speed;
slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- info("Successfully changed frequency/mode for adapter in slot %d\n",
+ info("Successfully changed frequency/mode for adapter in slot %d\n",
slot->number);
return 0;
}
-/* the following routines constitute the bulk of the
- hotplug controller logic
+/* the following routines constitute the bulk of the
+ * hotplug controller logic
*/
@@ -1268,17 +1277,17 @@
hp_slot = func->device - ctrl->slot_device_offset;
- if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) {
- /**********************************
- * The switch is open.
- **********************************/
+ /*
+ * The switch is open.
+ */
+ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
rc = INTERLOCK_OPEN;
- } else if (is_slot_enabled (ctrl, hp_slot)) {
- /**********************************
- * The board is already on
- **********************************/
+ /*
+ * The board is already on
+ */
+ else if (is_slot_enabled (ctrl, hp_slot))
rc = CARD_FUNCTIONING;
- } else {
+ else {
mutex_lock(&ctrl->crit_sect);
/* turn on board without attaching to the bus */
@@ -1299,7 +1308,7 @@
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
@@ -1352,7 +1361,8 @@
* Get slot won't work for devices behind
* bridges, but in this case it will always be
* called for the "base" bus/dev/func of an
- * adapter. */
+ * adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1377,7 +1387,8 @@
* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
- * bus/dev/func of an adapter. */
+ * bus/dev/func of an adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1434,7 +1445,8 @@
wait_for_ctrl_irq (ctrl);
/* Change bits in slot power register to force another shift out
- * NOTE: this is to work around the timer bug */
+ * NOTE: this is to work around the timer bug
+ */
temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
@@ -1443,12 +1455,12 @@
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;
-
+
/* turn off board without attaching to the bus */
disable_slot_power (ctrl, hp_slot);
@@ -1461,7 +1473,7 @@
if (rc)
return rc;
-
+
p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
/* turn on board and blink green LED */
@@ -1521,7 +1533,7 @@
}
/* All F's is an empty slot or an invalid board */
- if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */
+ if (temp_register != 0xFFFFFFFF) {
res_lists.io_head = ctrl->io_head;
res_lists.mem_head = ctrl->mem_head;
res_lists.p_mem_head = ctrl->p_mem_head;
@@ -1570,9 +1582,8 @@
index = 0;
do {
new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
- if (new_slot && !new_slot->pci_dev) {
+ if (new_slot && !new_slot->pci_dev)
cpqhp_configure_device(ctrl, new_slot);
- }
} while (new_slot);
mutex_lock(&ctrl->crit_sect);
@@ -1859,12 +1870,12 @@
info(msg_button_on, p_slot->number);
}
mutex_lock(&ctrl->crit_sect);
-
+
dbg("blink green LED and turn off amber\n");
-
+
amber_LED_off (ctrl, hp_slot);
green_LED_blink (ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -1958,7 +1969,7 @@
if (cpqhp_process_SI(ctrl, func) != 0) {
amber_LED_on(ctrl, hp_slot);
green_LED_off(ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -2079,7 +2090,7 @@
struct pci_bus *pci_bus = ctrl->pci_bus;
int physical_slot=0;
- device = func->device;
+ device = func->device;
func = cpqhp_slot_find(ctrl->bus, device, index++);
p_slot = cpqhp_find_slot(ctrl, device);
if (p_slot) {
@@ -2113,9 +2124,8 @@
/* If the VGA Enable bit is set, remove isn't
* supported */
- if (BCR & PCI_BRIDGE_CTL_VGA) {
+ if (BCR & PCI_BRIDGE_CTL_VGA)
rc = REMOVE_NOT_SUPPORTED;
- }
}
}
@@ -2183,67 +2193,67 @@
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
switch (test_num) {
- case 1:
- /* Do stuff here! */
+ case 1:
+ /* Do stuff here! */
- /* Do that funky LED thing */
- /* so we can restore them later */
- save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
- work_LED = 0x01010101;
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ /* Do that funky LED thing */
+ /* so we can restore them later */
+ save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
+ work_LED = 0x01010101;
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
- work_LED = 0x01010000;
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ work_LED = 0x00000101;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ for (loop = 0; loop < num_of_slots; loop++) {
+ set_SOGO(ctrl);
+
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
+
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED >> 16;
writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- work_LED = 0x00000101;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
-
- work_LED = 0x01010000;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- for (loop = 0; loop < num_of_slots; loop++) {
- set_SOGO(ctrl);
-
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
-
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED >> 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
-
- set_SOGO(ctrl);
-
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
-
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED << 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- work_LED = work_LED << 1;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- }
-
- /* put it back the way it was */
- writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
set_SOGO(ctrl);
- /* Wait for SOBS to be unset */
+ /* Wait for SOGO interrupt */
wait_for_ctrl_irq (ctrl);
- break;
- case 2:
- /* Do other stuff here! */
- break;
- case 3:
- /* and more... */
- break;
+
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED << 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ work_LED = work_LED << 1;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ }
+
+ /* put it back the way it was */
+ writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+ break;
+ case 2:
+ /* Do other stuff here! */
+ break;
+ case 3:
+ /* and more... */
+ break;
}
return 0;
}
@@ -2312,9 +2322,9 @@
while ((function < max_functions) && (!stop_it)) {
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
- if (ID == 0xFFFFFFFF) { /* There's nothing there. */
+ if (ID == 0xFFFFFFFF) {
function++;
- } else { /* There's something there */
+ } else {
/* Setup slot structure. */
new_slot = cpqhp_slot_create(func->bus);
@@ -2339,8 +2349,8 @@
/*
- Configuration logic that involves the hotplug data structures and
- their bookkeeping
+ * Configuration logic that involves the hotplug data structures and
+ * their bookkeeping
*/
@@ -2393,7 +2403,7 @@
if (rc)
return rc;
- if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */
+ if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* set Primary bus */
dbg("set Primary bus = %d\n", func->bus);
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2484,7 +2494,8 @@
temp_resources.irqs = &irqs;
/* Make copies of the nodes we are going to pass down so that
- * if there is a problem,we can just use these to free resources */
+ * if there is a problem,we can just use these to free resources
+ */
hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
@@ -2556,7 +2567,8 @@
temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
- /* Adjust this to compensate for extra adjustment in first loop */
+ /* Adjust this to compensate for extra adjustment in first loop
+ */
irqs.barber_pole--;
rc = 0;
@@ -2917,27 +2929,26 @@
} /* End of base register loop */
if (cpqhp_legacy_mode) {
/* Figure out which interrupt pin this function uses */
- rc = pci_bus_read_config_byte (pci_bus, devfn,
+ rc = pci_bus_read_config_byte (pci_bus, devfn,
PCI_INTERRUPT_PIN, &temp_byte);
/* If this function needs an interrupt and we are behind
* a bridge and the pin is tied to something that's
* alread mapped, set this one the same */
- if (temp_byte && resources->irqs &&
- (resources->irqs->valid_INT &
+ if (temp_byte && resources->irqs &&
+ (resources->irqs->valid_INT &
(0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
/* We have to share with something already set up */
- IRQ = resources->irqs->interrupt[(temp_byte +
+ IRQ = resources->irqs->interrupt[(temp_byte +
resources->irqs->barber_pole - 1) & 0x03];
} else {
/* Program IRQ based on card type */
rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
- if (class_code == PCI_BASE_CLASS_STORAGE) {
+ if (class_code == PCI_BASE_CLASS_STORAGE)
IRQ = cpqhp_disk_irq;
- } else {
+ else
IRQ = cpqhp_nic_irq;
- }
}
/* IRQ Line */
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index cb17488..76ba8a1 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -94,12 +94,13 @@
static void __iomem *compaq_int15_entry_point;
-static spinlock_t int15_lock; /* lock for ordering int15_bios_call() */
+/* lock for ordering int15_bios_call() */
+static spinlock_t int15_lock;
/* This is a series of function that deals with
- setting & getting the hotplug resource table in some environment variable.
-*/
+ * setting & getting the hotplug resource table in some environment variable.
+ */
/*
* We really shouldn't be doing this unless there is a _very_ good reason to!!!
@@ -113,7 +114,7 @@
if ((*used + 1) > *avail)
return(1);
-
+
*((u8*)*p_buffer) = value;
tByte = (u8**)p_buffer;
(*tByte)++;
@@ -170,10 +171,10 @@
unsigned long flags;
int op = operation;
int ret_val;
-
+
if (!compaq_int15_entry_point)
return -ENODEV;
-
+
spin_lock_irqsave(&int15_lock, flags);
__asm__ (
"xorl %%ebx,%%ebx\n" \
@@ -187,7 +188,7 @@
"D" (buffer), "m" (compaq_int15_entry_point)
: "%ebx", "%edx");
spin_unlock_irqrestore(&int15_lock, flags);
-
+
return((ret_val & 0xFF00) >> 8);
}
@@ -210,14 +211,16 @@
available = 1024;
- // Now load the EV
+ /* Now load the EV */
temp_dword = available;
rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
evbuffer_length = temp_dword;
- // We're maintaining the resource lists so write FF to invalidate old info
+ /* We're maintaining the resource lists so write FF to invalidate old
+ * info
+ */
temp_dword = 1;
rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
@@ -263,13 +266,13 @@
p_EV_header = (struct ev_hrt_header *) pFill;
ctrl = cpqhp_ctrl_list;
-
- // The revision of this structure
+
+ /* The revision of this structure */
rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
if (rc)
return(rc);
- // The number of controllers
+ /* The number of controllers */
rc = add_byte( &pFill, 1, &usedbytes, &available);
if (rc)
return(rc);
@@ -279,27 +282,27 @@
numCtrl++;
- // The bus number
+ /* The bus number */
rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
if (rc)
return(rc);
- // The device Number
+ /* The device Number */
rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // The function Number
+ /* The function Number */
rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // Skip the number of available entries
+ /* Skip the number of available entries */
rc = add_dword( &pFill, 0, &usedbytes, &available);
if (rc)
return(rc);
- // Figure out memory Available
+ /* Figure out memory Available */
resNode = ctrl->mem_head;
@@ -308,12 +311,12 @@
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -321,10 +324,10 @@
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->mem_avail = loop;
- // Figure out prefetchable memory Available
+ /* Figure out prefetchable memory Available */
resNode = ctrl->p_mem_head;
@@ -333,12 +336,12 @@
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -346,10 +349,10 @@
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->p_mem_avail = loop;
- // Figure out IO Available
+ /* Figure out IO Available */
resNode = ctrl->io_head;
@@ -358,12 +361,12 @@
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -371,10 +374,10 @@
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->io_avail = loop;
- // Figure out bus Available
+ /* Figure out bus Available */
resNode = ctrl->bus_head;
@@ -383,12 +386,12 @@
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -396,15 +399,15 @@
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->bus_avail = loop;
ctrl = ctrl->next;
}
-
+
p_EV_header->num_of_ctrl = numCtrl;
- // Now store the EV
+ /* Now store the EV */
temp_dword = usedbytes;
@@ -449,20 +452,21 @@
struct ev_hrt_header *p_EV_header;
if (!evbuffer_init) {
- // Read the resource list information in from NVRAM
+ /* Read the resource list information in from NVRAM */
if (load_HRT(rom_start))
memset (evbuffer, 0, 1024);
evbuffer_init = 1;
}
- // If we saved information in NVRAM, use it now
+ /* If we saved information in NVRAM, use it now */
p_EV_header = (struct ev_hrt_header *) evbuffer;
- // The following code is for systems where version 1.0 of this
- // driver has been loaded, but doesn't support the hardware.
- // In that case, the driver would incorrectly store something
- // in NVRAM.
+ /* The following code is for systems where version 1.0 of this
+ * driver has been loaded, but doesn't support the hardware.
+ * In that case, the driver would incorrectly store something
+ * in NVRAM.
+ */
if ((p_EV_header->Version == 2) ||
((p_EV_header->Version == 1) && !ctrl->push_flag)) {
p_byte = &(p_EV_header->next);
@@ -479,7 +483,7 @@
function = p_ev_ctrl->function;
while ((bus != ctrl->bus) ||
- (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
+ (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
(function != PCI_FUNC(ctrl->pci_dev->devfn))) {
nummem = p_ev_ctrl->mem_avail;
numpmem = p_ev_ctrl->p_mem_avail;
@@ -491,7 +495,7 @@
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
return 2;
- // Skip forward to the next entry
+ /* Skip forward to the next entry */
p_byte += (nummem + numpmem + numio + numbus) * 8;
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
@@ -629,8 +633,9 @@
ctrl->bus_head = bus_node;
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
@@ -640,14 +645,14 @@
if (rc)
return(rc);
} else {
- if ((evbuffer[0] != 0) && (!ctrl->push_flag))
+ if ((evbuffer[0] != 0) && (!ctrl->push_flag))
return 1;
}
return 0;
}
-
+
int compaq_nvram_store (void __iomem *rom_start)
{
int rc = 1;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6c0ed0f..6173b9a 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -37,7 +37,6 @@
#include "../pci.h"
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
u8 cpqhp_nic_irq;
@@ -82,14 +81,14 @@
}
-int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
+int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
{
unsigned char bus;
struct pci_bus *child;
int num;
if (func->pci_dev == NULL)
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
/* No pci device, we need to create it then */
if (func->pci_dev == NULL) {
@@ -99,7 +98,7 @@
if (num)
pci_bus_add_devices(ctrl->pci_dev->bus);
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
if (func->pci_dev == NULL) {
dbg("ERROR: pci_dev still null\n");
return 0;
@@ -112,20 +111,24 @@
pci_do_scan_bus(child);
}
+ pci_dev_put(func->pci_dev);
+
return 0;
}
-int cpqhp_unconfigure_device(struct pci_func* func)
+int cpqhp_unconfigure_device(struct pci_func* func)
{
int j;
-
+
dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_find_slot(func->bus, PCI_DEVFN(func->device, j));
- if (temp)
+ struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
+ if (temp) {
+ pci_dev_put(temp);
pci_remove_bus_device(temp);
+ }
}
return 0;
}
@@ -178,32 +181,22 @@
if (!rc)
return !rc;
- // set the Edge Level Control Register (ELCR)
+ /* set the Edge Level Control Register (ELCR) */
temp_word = inb(0x4d0);
temp_word |= inb(0x4d1) << 8;
temp_word |= 0x01 << irq_num;
- // This should only be for x86 as it sets the Edge Level Control Register
- outb((u8) (temp_word & 0xFF), 0x4d0);
- outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1);
- rc = 0;
- }
+ /* This should only be for x86 as it sets the Edge Level
+ * Control Register
+ */
+ outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
+ 0xFF00) >> 8), 0x4d1); rc = 0; }
return rc;
}
-/*
- * WTF??? This function isn't in the code, yet a function calls it, but the
- * compiler optimizes it away? strange. Here as a placeholder to keep the
- * compiler happy.
- */
-static int PCI_ScanBusNonBridge (u8 bus, u8 device)
-{
- return 0;
-}
-
static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
{
u16 tdevice;
@@ -213,11 +206,11 @@
ctrl->pci_bus->number = bus_num;
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. Not a bridge ?
+ /* Yep we got one. Not a bridge ? */
if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
*dev_num = tdevice;
dbg("found it !\n");
@@ -225,16 +218,16 @@
}
}
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. bridge ?
+ /* Yep we got one. bridge ? */
if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
+ /* XXX: no recursion, wtf? */
dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
- if (PCI_ScanBusNonBridge(tbus, tdevice) == 0)
- return 0;
+ return 0;
}
}
@@ -244,39 +237,23 @@
static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
- long len;
- long loop;
+ int loop, len;
u32 work;
-
u8 tbus, tdevice, tslot;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength );
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if (tslot == slot) {
*bus_num = tbus;
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff)) {
- kfree(PCIIRQRoutingInfoLength );
+ if (!nobridge || (work == 0xffffffff))
return 0;
- }
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
@@ -287,28 +264,26 @@
dbg("Scan bus for Non Bridge: bus %d\n", tbus);
if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
*bus_num = tbus;
- kfree(PCIIRQRoutingInfoLength );
return 0;
}
- } else {
- kfree(PCIIRQRoutingInfoLength );
+ } else
return 0;
- }
-
}
}
- kfree(PCIIRQRoutingInfoLength );
return -1;
}
int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
{
- return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); //plain (bridges allowed)
+ /* plain (bridges allowed) */
+ return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
}
-/* More PCI configuration routines; this time centered around hotplug controller */
+/* More PCI configuration routines; this time centered around hotplug
+ * controller
+ */
/*
@@ -339,12 +314,12 @@
int stop_it;
int index;
- // Decide which slots are supported
+ /* Decide which slots are supported */
if (is_hot_plug) {
- //*********************************
- // is_hot_plug is the slot mask
- //*********************************
+ /*
+ * is_hot_plug is the slot mask
+ */
FirstSupported = is_hot_plug >> 4;
LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
} else {
@@ -352,123 +327,127 @@
LastSupported = 0x1F;
}
- // Save PCI configuration space for all devices in supported slots
+ /* Save PCI configuration space for all devices in supported slots */
ctrl->pci_bus->number = busnumber;
for (device = FirstSupported; device <= LastSupported; device++) {
ID = 0xFFFFFFFF;
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
- if (ID != 0xFFFFFFFF) { // device in slot
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
- if (rc)
- return rc;
-
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
-
- // If multi-function device, set max_functions to 8
- if (header_type & 0x80)
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- DevError = 0;
-
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // P-P Bridge
- // Recurse the subordinate bus
- // get the subordinate bus number
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
- if (rc) {
- return rc;
- } else {
- sub_bus = (int) secondary_bus;
-
- // Save secondary bus cfg spc
- // with this recursive call.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return rc;
- ctrl->pci_bus->number = busnumber;
- }
- }
-
- index = 0;
- new_slot = cpqhp_slot_find(busnumber, device, index++);
- while (new_slot &&
- (new_slot->function != (u8) function))
- new_slot = cpqhp_slot_find(busnumber, device, index++);
-
- if (!new_slot) {
- // Setup slot structure.
- new_slot = cpqhp_slot_create(busnumber);
-
- if (new_slot == NULL)
- return(1);
- }
+ if (ID == 0xFFFFFFFF) {
+ if (is_hot_plug) {
+ /* Setup slot structure with entry for empty
+ * slot
+ */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
new_slot->bus = (u8) busnumber;
new_slot->device = (u8) device;
- new_slot->function = (u8) function;
- new_slot->is_a_board = 1;
- new_slot->switch_save = 0x10;
- // In case of unsupported board
- new_slot->status = DevError;
- new_slot->pci_dev = pci_find_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
+ new_slot->function = 0;
+ new_slot->is_a_board = 0;
+ new_slot->presence_save = 0;
+ new_slot->switch_save = 0;
+ }
+ continue;
+ }
- for (cloop = 0; cloop < 0x20; cloop++) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
+ if (rc)
+ return rc;
+
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
+
+ /* If multi-function device, set max_functions to 8 */
+ if (header_type & 0x80)
+ max_functions = 8;
+ else
+ max_functions = 1;
+
+ function = 0;
+
+ do {
+ DevError = 0;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus
+ * get the subordinate bus number
+ */
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (rc) {
+ return rc;
+ } else {
+ sub_bus = (int) secondary_bus;
+
+ /* Save secondary bus cfg spc
+ * with this recursive call.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
if (rc)
return rc;
+ ctrl->pci_bus->number = busnumber;
}
+ }
- function++;
+ index = 0;
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
+ while (new_slot &&
+ (new_slot->function != (u8) function))
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
- stop_it = 0;
-
- // this loop skips to the next present function
- // reading in Class Code and Header type.
-
- while ((function < max_functions)&&(!stop_it)) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
- if (rc)
- return rc;
-
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
-
- stop_it++;
- }
- }
-
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else if (is_hot_plug) {
- // Setup slot structure with entry for empty slot
- new_slot = cpqhp_slot_create(busnumber);
-
- if (new_slot == NULL) {
- return(1);
+ if (!new_slot) {
+ /* Setup slot structure. */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
}
new_slot->bus = (u8) busnumber;
new_slot->device = (u8) device;
- new_slot->function = 0;
- new_slot->is_a_board = 0;
- new_slot->presence_save = 0;
- new_slot->switch_save = 0;
- }
- } // End of FOR loop
+ new_slot->function = (u8) function;
+ new_slot->is_a_board = 1;
+ new_slot->switch_save = 0x10;
+ /* In case of unsupported board */
+ new_slot->status = DevError;
+ new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
- return(0);
+ for (cloop = 0; cloop < 0x20; cloop++) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+ if (rc)
+ return rc;
+ }
+
+ pci_dev_put(new_slot->pci_dev);
+
+ function++;
+
+ stop_it = 0;
+
+ /* this loop skips to the next present function
+ * reading in Class Code and Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
+ if (ID == 0xFFFFFFFF) {
+ function++;
+ continue;
+ }
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
+ if (rc)
+ return rc;
+
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
+
+ stop_it++;
+ }
+
+ } while (function < max_functions);
+ } /* End of FOR loop */
+
+ return 0;
}
@@ -489,7 +468,7 @@
u8 secondary_bus;
int sub_bus;
int max_functions;
- int function;
+ int function = 0;
int cloop = 0;
int stop_it;
@@ -498,63 +477,58 @@
ctrl->pci_bus->number = new_slot->bus;
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
- if (ID != 0xFFFFFFFF) { // device in slot
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
-
- if (header_type & 0x80) // Multi-function device
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Recurse the subordinate bus
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
-
- sub_bus = (int) secondary_bus;
-
- // Save the config headers for the secondary bus.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return(rc);
- ctrl->pci_bus->number = new_slot->bus;
-
- } // End of IF
-
- new_slot->status = 0;
-
- for (cloop = 0; cloop < 0x20; cloop++) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- }
-
- function++;
-
- stop_it = 0;
-
- // this loop skips to the next present function
- // reading in the Class Code and the Header type.
-
- while ((function < max_functions) && (!stop_it)) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
-
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
-
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
-
- stop_it++;
- }
- }
-
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else {
+ if (ID == 0xFFFFFFFF)
return 2;
+
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
+
+ if (header_type & 0x80) /* Multi-function device */
+ max_functions = 8;
+ else
+ max_functions = 1;
+
+ while (function < max_functions) {
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus */
+ pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
+
+ sub_bus = (int) secondary_bus;
+
+ /* Save the config headers for the secondary
+ * bus.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
+ return(rc);
+ ctrl->pci_bus->number = new_slot->bus;
+
+ }
+
+ new_slot->status = 0;
+
+ for (cloop = 0; cloop < 0x20; cloop++)
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+
+ function++;
+
+ stop_it = 0;
+
+ /* this loop skips to the next present function
+ * reading in the Class Code and the Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
+
+ if (ID == 0xFFFFFFFF)
+ function++;
+ else {
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ stop_it++;
+ }
+ }
+
}
return 0;
@@ -590,11 +564,10 @@
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- // PCI-PCI Bridge
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -610,23 +583,27 @@
}
pci_bus->number = func->bus;
- //FIXME: this loop is duplicated in the non-bridge case. The two could be rolled together
- // Figure out IO and memory base lengths
+ /* FIXME: this loop is duplicated in the non-bridge
+ * case. The two could be rolled together Figure out
+ * IO and memory base lengths
+ */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
-
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -637,32 +614,36 @@
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] =
base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
-
- } else if ((header_type & 0x7F) == 0x00) { // PCI-PCI Bridge
- // Figure out IO and memory base lengths
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // base = amount of IO space requested
+ /* IO base
+ * base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
- // base = amount of memory space requested
+ /* memory base
+ * base = amount of memory
+ * space requested
+ */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -673,16 +654,16 @@
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] = base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
- } else { // Some other unknown header type
+ } else { /* Some other unknown header type */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -728,18 +709,18 @@
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Save the command register
+ /* Save the command register */
pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
- // disable card
+ /* disable card */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Clear Bridge Control Register
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Clear Bridge Control Register */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
@@ -755,7 +736,7 @@
bus_node->next = func->bus_head;
func->bus_head = bus_node;
- // Save IO base and Limit registers
+ /* Save IO base and Limit registers */
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
@@ -771,7 +752,7 @@
func->io_head = io_node;
}
- // Save memory base and Limit registers
+ /* Save memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
@@ -787,7 +768,7 @@
func->mem_head = mem_node;
}
- // Save prefetchable memory base and Limit registers
+ /* Save prefetchable memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
@@ -802,7 +783,7 @@
p_mem_node->next = func->p_mem_head;
func->p_mem_head = p_mem_node;
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
@@ -812,11 +793,14 @@
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -834,7 +818,7 @@
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -851,7 +835,7 @@
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -868,9 +852,10 @@
} else
return(1);
}
- } // End of base register loop
- } else if ((header_type & 0x7F) == 0x00) { // Standard header
- // Figure out IO and memory base lengths
+ } /* End of base register loop */
+ /* Standard header */
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
@@ -880,11 +865,14 @@
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -901,7 +889,7 @@
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -918,7 +906,7 @@
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -935,15 +923,14 @@
} else
return(1);
}
- } // End of base register loop
- } else { // Some other unknown header type
+ } /* End of base register loop */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
- return(0);
+ return 0;
}
@@ -975,16 +962,16 @@
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Start at the top of config space so that the control
- // registers are programmed last
- for (cloop = 0x3C; cloop > 0; cloop -= 4) {
+ /* Start at the top of config space so that the control
+ * registers are programmed last
+ */
+ for (cloop = 0x3C; cloop > 0; cloop -= 4)
pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
- }
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- // If this is a bridge device, restore subordinate devices
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
+ /* If this is a bridge device, restore subordinate devices */
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -1000,8 +987,9 @@
}
} else {
- // Check all the base Address Registers to make sure
- // they are the same. If not, the board is different.
+ /* Check all the base Address Registers to make sure
+ * they are the same. If not, the board is different.
+ */
for (cloop = 16; cloop < 40; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
@@ -1058,27 +1046,28 @@
pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
- // No adapter present
+ /* No adapter present */
if (temp_register == 0xFFFFFFFF)
return(NO_ADAPTER_PRESENT);
if (temp_register != func->config_space[0])
return(ADAPTER_NOT_SAME);
- // Check for same revision number and class code
+ /* Check for same revision number and class code */
pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
- // Adapter not the same
+ /* Adapter not the same */
if (temp_register != func->config_space[0x08 >> 2])
return(ADAPTER_NOT_SAME);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // In order to continue checking, we must program the
- // bus registers in the bridge to respond to accesses
- // for it's subordinate bus(es)
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* In order to continue checking, we must program the
+ * bus registers in the bridge to respond to accesses
+ * for its subordinate bus(es)
+ */
temp_register = func->config_space[0x18 >> 2];
pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
@@ -1096,35 +1085,39 @@
}
}
- // Check to see if it is a standard config header
+ /* Check to see if it is a standard config header */
else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
- // Check subsystem vendor and ID
+ /* Check subsystem vendor and ID */
pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
if (temp_register != func->config_space[0x2C >> 2]) {
- // If it's a SMART-2 and the register isn't filled
- // in, ignore the difference because
- // they just have an old rev of the firmware
-
+ /* If it's a SMART-2 and the register isn't
+ * filled in, ignore the difference because
+ * they just have an old rev of the firmware
+ */
if (!((func->config_space[0] == 0xAE100E11)
&& (temp_register == 0x00L)))
return(ADAPTER_NOT_SAME);
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO
+ * space requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -1135,23 +1128,24 @@
type = 0;
}
- // Check information in slot structure
+ /* Check information in slot structure */
if (func->base_length[(cloop - 0x10) >> 2] != base)
return(ADAPTER_NOT_SAME);
if (func->base_type[(cloop - 0x10) >> 2] != type)
return(ADAPTER_NOT_SAME);
- } // End of base register loop
+ } /* End of base register loop */
- } // End of (type 0 config space) else
+ } /* End of (type 0 config space) else */
else {
- // this is not a type 0 or 1 config space header so
- // we don't know how to do it
+ /* this is not a type 0 or 1 config space header so
+ * we don't know how to do it
+ */
return(DEVICE_TYPE_NOT_SUPPORTED);
}
- // Get the next function
+ /* Get the next function */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -1168,7 +1162,7 @@
* this function is for hot plug ADD!
*
* returns 0 if success
- */
+ */
int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
{
u8 temp;
@@ -1187,10 +1181,10 @@
rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
dbg("rom_resource_table = %p\n", rom_resource_table);
- if (rom_resource_table == NULL) {
+ if (rom_resource_table == NULL)
return -ENODEV;
- }
- // Sum all resources and setup resource maps
+
+ /* Sum all resources and setup resource maps */
unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
dbg("unused_IRQ = %x\n", unused_IRQ);
@@ -1222,13 +1216,11 @@
temp = 0;
- if (!cpqhp_nic_irq) {
+ if (!cpqhp_nic_irq)
cpqhp_nic_irq = ctrl->cfgspc_irq;
- }
- if (!cpqhp_disk_irq) {
+ if (!cpqhp_disk_irq)
cpqhp_disk_irq = ctrl->cfgspc_irq;
- }
dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
@@ -1262,13 +1254,13 @@
dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
primary_bus, secondary_bus, max_bus);
- // If this entry isn't for our controller's bus, ignore it
+ /* If this entry isn't for our controller's bus, ignore it */
if (primary_bus != ctrl->bus) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // find out if this entry is for an occupied slot
+ /* find out if this entry is for an occupied slot */
ctrl->pci_bus->number = primary_bus;
pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
dbg("temp_D_word = %x\n", temp_dword);
@@ -1282,13 +1274,13 @@
func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
}
- // If we can't find a match, skip this table entry
+ /* If we can't find a match, skip this table entry */
if (!func) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // this may not work and shouldn't be used
+ /* this may not work and shouldn't be used */
if (secondary_bus != primary_bus)
bridged_slot = 1;
else
@@ -1301,7 +1293,7 @@
}
- // If we've got a valid IO base, use it
+ /* If we've got a valid IO base, use it */
temp_dword = io_base + io_length;
@@ -1325,7 +1317,7 @@
}
}
- // If we've got a valid memory base, use it
+ /* If we've got a valid memory base, use it */
temp_dword = mem_base + mem_length;
if ((mem_base) && (temp_dword < 0x10000)) {
mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
@@ -1348,8 +1340,9 @@
}
}
- // If we've got a valid prefetchable memory base, and
- // the base + length isn't greater than 0xFFFF
+ /* If we've got a valid prefetchable memory base, and
+ * the base + length isn't greater than 0xFFFF
+ */
temp_dword = pre_mem_base + pre_mem_length;
if ((pre_mem_base) && (temp_dword < 0x10000)) {
p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
@@ -1372,9 +1365,10 @@
}
}
- // If we've got a valid bus number, use it
- // The second condition is to ignore bus numbers on
- // populated slots that don't have PCI-PCI bridges
+ /* If we've got a valid bus number, use it
+ * The second condition is to ignore bus numbers on
+ * populated slots that don't have PCI-PCI bridges
+ */
if (secondary_bus && (secondary_bus != primary_bus)) {
bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
if (!bus_node)
@@ -1398,8 +1392,9 @@
one_slot += sizeof (struct slot_rt);
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 42e4260..7485ffd 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1318,7 +1318,6 @@
}
struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = ibmphp_disable_slot,
@@ -1421,3 +1420,4 @@
}
module_init(ibmphp_init);
+module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 535fce0..8445804 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -347,125 +347,129 @@
.store = test_write_file
};
-static int has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_latch_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_adapter_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_max_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_max_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_cur_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_cur_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->hardware_test)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
static int fs_add_slot(struct pci_slot *slot)
{
int retval = 0;
- if (has_power_file(slot) == 0) {
- retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ /* Create symbolic link to the hotplug driver module */
+ pci_hp_create_module_link(slot);
+
+ if (has_power_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(slot) == 0) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(slot) == 0) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(slot) == 0) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_max_bus_speed_file(slot) == 0) {
+ if (has_max_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_max_bus_speed.attr);
+ &hotplug_slot_attr_max_bus_speed.attr);
if (retval)
goto exit_max_speed;
}
- if (has_cur_bus_speed_file(slot) == 0) {
+ if (has_cur_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_cur_bus_speed.attr);
+ &hotplug_slot_attr_cur_bus_speed.attr);
if (retval)
goto exit_cur_speed;
}
- if (has_test_file(slot) == 0) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -475,55 +479,61 @@
goto exit;
exit_test:
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
-
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
exit_cur_speed:
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
-
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
exit_max_speed:
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
-
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
-
exit_latch:
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
-
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
+ pci_hp_remove_module_link(slot);
exit:
return retval;
}
static void fs_remove_slot(struct pci_slot *slot)
{
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
- if (has_test_file(slot) == 0)
+ if (has_test_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+
+ pci_hp_remove_module_link(slot);
}
static struct hotplug_slot *get_slot_from_name (const char *name)
@@ -540,10 +550,10 @@
}
/**
- * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
+ * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
* @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register
- * @slot_nr: slot number
+ * @devnr: device number
* @name: name registered with kobject core
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
@@ -551,8 +561,9 @@
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
- const char *name)
+int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+ int devnr, const char *name,
+ struct module *owner, const char *mod_name)
{
int result;
struct pci_slot *pci_slot;
@@ -567,14 +578,16 @@
return -EINVAL;
}
- mutex_lock(&pci_hp_mutex);
+ slot->ops->owner = owner;
+ slot->ops->mod_name = mod_name;
+ mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
- pci_slot = pci_create_slot(bus, slot_nr, name, slot);
+ pci_slot = pci_create_slot(bus, devnr, name, slot);
if (IS_ERR(pci_slot)) {
result = PTR_ERR(pci_slot);
goto out;
@@ -684,6 +697,6 @@
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-EXPORT_SYMBOL_GPL(pci_hp_register);
+EXPORT_SYMBOL_GPL(__pci_hp_register);
EXPORT_SYMBOL_GPL(pci_hp_deregister);
EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 0a36854..e6cf096 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -81,7 +81,6 @@
struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
- unsigned long last_emi_toggle;
struct delayed_work work; /* work for button event */
struct mutex lock;
};
@@ -203,8 +202,6 @@
int (*set_attention_status)(struct slot *slot, u8 status);
int (*get_latch_status)(struct slot *slot, u8 *status);
int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_emi_status)(struct slot *slot, u8 *status);
- int (*toggle_emi)(struct slot *slot);
int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index fb254b2..2317557 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -73,7 +73,6 @@
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -85,99 +84,6 @@
.get_cur_bus_speed = get_cur_bus_speed,
};
-/*
- * Check the status of the Electro Mechanical Interlock (EMI)
- */
-static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- return (slot->hpc_ops->get_emi_status(slot, value));
-}
-
-/*
- * sysfs interface for the Electro Mechanical Interlock (EMI)
- * 1 == locked, 0 == unlocked
- */
-static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
-{
- int retval;
- u8 value;
-
- retval = get_lock_status(slot, &value);
- if (retval)
- goto lock_read_exit;
- retval = sprintf (buf, "%d\n", value);
-
-lock_read_exit:
- return retval;
-}
-
-/*
- * Change the status of the Electro Mechanical Interlock (EMI)
- * This is a toggle - in addition there must be at least 1 second
- * in between toggles.
- */
-static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
-{
- struct slot *slot = hotplug_slot->private;
- int retval;
- u8 value;
-
- mutex_lock(&slot->ctrl->crit_sect);
-
- /* has it been >1 sec since our last toggle? */
- if ((get_seconds() - slot->last_emi_toggle) < 1) {
- mutex_unlock(&slot->ctrl->crit_sect);
- return -EINVAL;
- }
-
- /* see what our current state is */
- retval = get_lock_status(hotplug_slot, &value);
- if (retval || (value == status))
- goto set_lock_exit;
-
- slot->hpc_ops->toggle_emi(slot);
-set_lock_exit:
- mutex_unlock(&slot->ctrl->crit_sect);
- return 0;
-}
-
-/*
- * sysfs interface which allows the user to toggle the Electro Mechanical
- * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
- */
-static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
- const char *buf, size_t count)
-{
- struct slot *slot = hotplug_slot->private;
- unsigned long llock;
- u8 lock;
- int retval = 0;
-
- llock = simple_strtoul(buf, NULL, 10);
- lock = (u8)(llock & 0xff);
-
- switch (lock) {
- case 0:
- case 1:
- retval = set_lock_status(hotplug_slot, lock);
- break;
- default:
- ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
- lock);
- retval = -EINVAL;
- }
- if (retval)
- return retval;
- return count;
-}
-
-static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
- .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
- .show = lock_read_file,
- .store = lock_write_file
-};
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -236,17 +142,6 @@
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
- /* create additional sysfs entries */
- if (EMI(ctrl)) {
- retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
- if (retval) {
- pci_hp_deregister(hotplug_slot);
- ctrl_err(ctrl, "Cannot create additional sysfs "
- "entries\n");
- goto error_info;
- }
- }
}
return 0;
@@ -261,13 +156,8 @@
static void cleanup_slots(struct controller *ctrl)
{
struct slot *slot;
-
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- if (EMI(ctrl))
- sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
+ list_for_each_entry(slot, &ctrl->slot_list, slot_list)
pci_hp_deregister(slot->hotplug_slot);
- }
}
/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 07bd321..5281325 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -422,35 +422,6 @@
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-static int hpc_get_emi_status(struct slot *slot, u8 *status)
-{
- struct controller *ctrl = slot->ctrl;
- u16 slot_status;
- int retval;
-
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot check EMI status\n");
- return retval;
- }
- *status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
- return retval;
-}
-
-static int hpc_toggle_emi(struct slot *slot)
-{
- u16 slot_cmd;
- u16 cmd_mask;
- int rc;
-
- slot_cmd = PCI_EXP_SLTCTL_EIC;
- cmd_mask = PCI_EXP_SLTCTL_EIC;
- rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
- slot->last_emi_toggle = get_seconds();
-
- return rc;
-}
-
static int hpc_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
@@ -874,8 +845,6 @@
.get_attention_status = hpc_get_attention_status,
.get_latch_status = hpc_get_latch_status,
.get_adapter_status = hpc_get_adapter_status,
- .get_emi_status = hpc_get_emi_status,
- .toggle_emi = hpc_toggle_emi,
.get_max_bus_speed = hpc_get_max_lnk_speed,
.get_cur_bus_speed = hpc_get_cur_lnk_speed,
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index e3dd6cf..5175d9b 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -82,7 +82,6 @@
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops skel_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 95d02a0..c159223 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -423,7 +423,6 @@
}
struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 2d6da78..a4494d7 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -83,7 +83,6 @@
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops sn_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index fe8d149..8a520a3 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -69,7 +69,6 @@
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index cd38916..178853a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -53,6 +53,8 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+#define MAX_AGAW_WIDTH 64
+
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
@@ -131,8 +133,6 @@
context->lo &= (((u64)-1) << 2) | 1;
}
-#define CONTEXT_TT_MULTI_LEVEL 0
-
static inline void context_set_translation_type(struct context_entry *context,
unsigned long value)
{
@@ -256,6 +256,7 @@
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
};
@@ -401,17 +402,13 @@
static inline int width_to_agaw(int width);
-/* calculate agaw for each iommu.
- * "SAGAW" may be different across iommus, use a default agaw, and
- * get a supported less agaw for iommus that don't support the default agaw.
- */
-int iommu_calculate_agaw(struct intel_iommu *iommu)
+static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw = -1;
sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ for (agaw = width_to_agaw(max_gaw);
agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
@@ -420,6 +417,24 @@
return agaw;
}
+/*
+ * Calculate max SAGAW for each iommu.
+ */
+int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
+}
+
+/*
+ * calculate agaw for each iommu.
+ * "SAGAW" may be different across iommus, use a default agaw, and
+ * get a supported less agaw for iommus that don't support the default agaw.
+ */
+int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+}
+
/* in native case, each domain is related to only one iommu */
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
@@ -809,7 +824,7 @@
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
void *addr;
- u32 cmd, sts;
+ u32 sts;
unsigned long flag;
addr = iommu->root_entry;
@@ -817,12 +832,11 @@
spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
- cmd = iommu->gcmd | DMA_GCMD_SRTP;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
+ readl, (sts & DMA_GSTS_RTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
@@ -834,39 +848,25 @@
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
- val = iommu->gcmd | DMA_GCMD_WBF;
spin_lock_irqsave(&iommu->register_lock, flag);
- writel(val, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
+ readl, (!(val & DMA_GSTS_WBFS)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_context(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_context(struct intel_iommu *iommu,
+ u16 did, u16 source_id, u8 function_mask,
+ u64 type)
{
u64 val = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_CCMD_GLOBAL_INVL:
val = DMA_CCMD_GLOBAL_INVL;
@@ -891,33 +891,16 @@
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
-
- /* flush context entry will implicitly flush write buffer */
- return 0;
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_TLB_GLOBAL_FLUSH:
/* global flush doesn't need set IVA_REG */
@@ -965,37 +948,101 @@
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val));
- /* flush iotlb entry will implicitly flush write buffer */
- return 0;
}
-static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int pages, int non_present_entry_flush)
+static struct device_domain_info *iommu_support_dev_iotlb(
+ struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
{
- unsigned int mask;
+ int found = 0;
+ unsigned long flags;
+ struct device_domain_info *info;
+ struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
+
+ if (!ecap_dev_iotlb_support(iommu->ecap))
+ return NULL;
+
+ if (!iommu->qi)
+ return NULL;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->bus == bus && info->devfn == devfn) {
+ found = 1;
+ break;
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (!found || !info->dev)
+ return NULL;
+
+ if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
+ return NULL;
+
+ if (!dmar_find_matched_atsr_unit(info->dev))
+ return NULL;
+
+ info->iommu = iommu;
+
+ return info;
+}
+
+static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info)
+ return;
+
+ pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
+}
+
+static void iommu_disable_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ return;
+
+ pci_disable_ats(info->dev);
+}
+
+static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
+ u64 addr, unsigned mask)
+{
+ u16 sid, qdep;
+ unsigned long flags;
+ struct device_domain_info *info;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link) {
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ continue;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = pci_ats_queue_depth(info->dev);
+ qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int pages)
+{
+ unsigned int mask = ilog2(__roundup_pow_of_two(pages));
BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);
- /* Fallback to domain selective flush if no PSI support */
- if (!cap_pgsel_inv(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
-
/*
+ * Fallback to domain selective flush if no PSI support or the size is
+ * too big.
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
- mask = ilog2(__roundup_pow_of_two(pages));
- /* Fallback to domain selective flush if size is too big */
- if (mask > cap_max_amask_val(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH, non_present_entry_flush);
-
- return iommu->flush.flush_iotlb(iommu, did, addr, mask,
- DMA_TLB_PSI_FLUSH,
- non_present_entry_flush);
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr, mask,
+ DMA_TLB_PSI_FLUSH);
+ if (did)
+ iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1021,13 +1068,13 @@
unsigned long flags;
spin_lock_irqsave(&iommu->register_lock, flags);
- writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
+ iommu->gcmd |= DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
+ readl, (sts & DMA_GSTS_TES), sts);
- iommu->gcmd |= DMA_GCMD_TE;
spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
}
@@ -1043,7 +1090,7 @@
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
+ readl, (!(sts & DMA_GSTS_TES)), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag);
return 0;
@@ -1325,8 +1372,8 @@
free_domain_mem(domain);
}
-static int domain_context_mapping_one(struct dmar_domain *domain,
- int segment, u8 bus, u8 devfn)
+static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
+ u8 bus, u8 devfn, int translation)
{
struct context_entry *context;
unsigned long flags;
@@ -1336,10 +1383,14 @@
unsigned long ndomains;
int id;
int agaw;
+ struct device_domain_info *info = NULL;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
BUG_ON(!domain->pgd);
+ BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
+ translation != CONTEXT_TT_MULTI_LEVEL);
iommu = device_to_iommu(segment, bus, devfn);
if (!iommu)
@@ -1399,21 +1450,44 @@
}
context_set_domain_id(context, id);
- context_set_address_width(context, iommu->agaw);
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
+
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+ info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
+ translation = info ? CONTEXT_TT_DEV_IOTLB :
+ CONTEXT_TT_MULTI_LEVEL;
+ }
+ /*
+ * In pass through mode, AW must be programmed to indicate the largest
+ * AGAW value supported by hardware. And ASR is ignored by hardware.
+ */
+ if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
+ context_set_address_width(context, iommu->msagaw);
+ else {
+ context_set_address_root(context, virt_to_phys(pgd));
+ context_set_address_width(context, iommu->agaw);
+ }
+
+ context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
- /* it's a non-present to present mapping */
- if (iommu->flush.flush_context(iommu, domain->id,
- (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL, 1))
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we only need to flush the write-buffer. If the
+ * _does_ cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ } else {
iommu_flush_write_buffer(iommu);
- else
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
-
+ }
+ iommu_enable_dev_iotlb(info);
spin_unlock_irqrestore(&iommu->lock, flags);
spin_lock_irqsave(&domain->iommu_lock, flags);
@@ -1426,13 +1500,15 @@
}
static int
-domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
+domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
+ int translation)
{
int ret;
struct pci_dev *tmp, *parent;
ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
- pdev->bus->number, pdev->devfn);
+ pdev->bus->number, pdev->devfn,
+ translation);
if (ret)
return ret;
@@ -1446,7 +1522,7 @@
ret = domain_context_mapping_one(domain,
pci_domain_nr(parent->bus),
parent->bus->number,
- parent->devfn);
+ parent->devfn, translation);
if (ret)
return ret;
parent = parent->bus->self;
@@ -1454,12 +1530,14 @@
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate),
- tmp->subordinate->number, 0);
+ tmp->subordinate->number, 0,
+ translation);
else /* this is a legacy PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->bus),
tmp->bus->number,
- tmp->devfn);
+ tmp->devfn,
+ translation);
}
static int domain_context_mapped(struct pci_dev *pdev)
@@ -1540,9 +1618,8 @@
clear_context_table(iommu, bus, devfn);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1561,6 +1638,7 @@
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
free_devinfo_mem(info);
@@ -1756,7 +1834,7 @@
goto error;
/* context entry init */
- ret = domain_context_mapping(domain, pdev);
+ ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (!ret)
return 0;
error:
@@ -1857,6 +1935,23 @@
}
#endif /* !CONFIG_DMAR_FLPY_WA */
+/* Initialize each context entry as pass through.*/
+static int __init init_context_pass_through(void)
+{
+ struct pci_dev *pdev = NULL;
+ struct dmar_domain *domain;
+ int ret;
+
+ for_each_pci_dev(pdev) {
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ ret = domain_context_mapping(domain, pdev,
+ CONTEXT_TT_PASS_THROUGH);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -1864,6 +1959,7 @@
struct pci_dev *pdev;
struct intel_iommu *iommu;
int i, ret;
+ int pass_through = 1;
/*
* for each drhd
@@ -1917,7 +2013,15 @@
printk(KERN_ERR "IOMMU: allocate root entry failed\n");
goto error;
}
+ if (!ecap_pass_through(iommu->ecap))
+ pass_through = 0;
}
+ if (iommu_pass_through)
+ if (!pass_through) {
+ printk(KERN_INFO
+ "Pass Through is not supported by hardware.\n");
+ iommu_pass_through = 0;
+ }
/*
* Start from the sane iommu hardware state.
@@ -1973,35 +2077,56 @@
}
/*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
+ * If pass through is set and enabled, context entries of all pci
+ * devices are intialized by pass through translation type.
*/
- for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /* some BIOS lists non-exist devices in DMAR table */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
- if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
+ if (iommu_pass_through) {
+ ret = init_context_pass_through();
+ if (ret) {
+ printk(KERN_ERR "IOMMU: Pass through init failed.\n");
+ iommu_pass_through = 0;
}
}
- iommu_prepare_gfx_mapping();
+ /*
+ * If pass through is not set or not enabled, setup context entries for
+ * identity mappings for rmrr, gfx, and isa.
+ */
+ if (!iommu_pass_through) {
+ /*
+ * For each rmrr
+ * for each dev attached to rmrr
+ * do
+ * locate drhd for dev, alloc domain for dev
+ * allocate free domain
+ * allocate page table entries for rmrr
+ * if context not allocated for bus
+ * allocate and init context
+ * set present in root table for this bus
+ * init context with domain, translation etc
+ * endfor
+ * endfor
+ */
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ pdev = rmrr->devices[i];
+ /*
+ * some BIOS lists non-exist devices in DMAR
+ * table.
+ */
+ if (!pdev)
+ continue;
+ ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: mapping reserved region failed\n");
+ }
+ }
- iommu_prepare_isa();
+ iommu_prepare_gfx_mapping();
+
+ iommu_prepare_isa();
+ }
/*
* for each drhd
@@ -2023,10 +2148,8 @@
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- 0);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- 0);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu);
@@ -2112,7 +2235,8 @@
/* make sure context mapping is ok */
if (unlikely(!domain_context_mapped(pdev))) {
- ret = domain_context_mapping(domain, pdev);
+ ret = domain_context_mapping(domain, pdev,
+ CONTEXT_TT_MULTI_LEVEL);
if (ret) {
printk(KERN_ERR
"Domain context map for %s failed",
@@ -2173,10 +2297,11 @@
if (ret)
goto error;
- /* it's a non-present to present mapping */
- ret = iommu_flush_iotlb_psi(iommu, domain->id,
- start_paddr, size >> VTD_PAGE_SHIFT, 1);
- if (ret)
+ /* it's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, 0, start_paddr,
+ size >> VTD_PAGE_SHIFT);
+ else
iommu_flush_write_buffer(iommu);
return start_paddr + ((u64)paddr & (~PAGE_MASK));
@@ -2210,15 +2335,22 @@
if (!iommu)
continue;
- if (deferred_flush[i].next) {
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
- for (j = 0; j < deferred_flush[i].next; j++) {
- __free_iova(&deferred_flush[i].domain[j]->iovad,
- deferred_flush[i].iova[j]);
- }
- deferred_flush[i].next = 0;
+ if (!deferred_flush[i].next)
+ continue;
+
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH);
+ for (j = 0; j < deferred_flush[i].next; j++) {
+ unsigned long mask;
+ struct iova *iova = deferred_flush[i].iova[j];
+
+ mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
+ mask = ilog2(mask >> VTD_PAGE_SHIFT);
+ iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+ iova->pfn_lo << PAGE_SHIFT, mask);
+ __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
+ deferred_flush[i].next = 0;
}
list_size = 0;
@@ -2291,9 +2423,8 @@
/* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
if (intel_iommu_strict) {
- if (iommu_flush_iotlb_psi(iommu,
- domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
- iommu_flush_write_buffer(iommu);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+ size >> VTD_PAGE_SHIFT);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2384,9 +2515,8 @@
/* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
- if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
- size >> VTD_PAGE_SHIFT, 0))
- iommu_flush_write_buffer(iommu);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+ size >> VTD_PAGE_SHIFT);
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -2478,10 +2608,13 @@
offset += size;
}
- /* it's a non-present to present mapping */
- if (iommu_flush_iotlb_psi(iommu, domain->id,
- start_addr, offset >> VTD_PAGE_SHIFT, 1))
+ /* it's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, 0, start_addr,
+ offset >> VTD_PAGE_SHIFT);
+ else
iommu_flush_write_buffer(iommu);
+
return nelems;
}
@@ -2640,9 +2773,9 @@
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu);
}
@@ -2657,9 +2790,9 @@
for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_TLB_GLOBAL_FLUSH);
}
}
@@ -2782,7 +2915,7 @@
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
- if (no_iommu || swiotlb || dmar_disabled)
+ if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
return -ENODEV;
iommu_init_mempool();
@@ -2802,7 +2935,15 @@
init_timer(&unmap_timer);
force_iommu = 1;
- dma_ops = &intel_dma_ops;
+
+ if (!iommu_pass_through) {
+ printk(KERN_INFO
+ "Multi-level page-table translation for DMAR.\n");
+ dma_ops = &intel_dma_ops;
+ } else
+ printk(KERN_INFO
+ "DMAR: Pass through translation for DMAR.\n");
+
init_iommu_sysfs();
register_iommu(&intel_iommu_ops);
@@ -2888,6 +3029,7 @@
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu_disable_dev_iotlb(info);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, pdev);
free_devinfo_mem(info);
@@ -2938,6 +3080,7 @@
spin_unlock_irqrestore(&device_domain_lock, flags1);
+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, info->dev);
@@ -3142,11 +3285,11 @@
return -EFAULT;
}
- ret = domain_context_mapping(dmar_domain, pdev);
+ ret = vm_domain_add_dev_info(dmar_domain, pdev);
if (ret)
return ret;
- ret = vm_domain_add_dev_info(dmar_domain, pdev);
+ ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
return ret;
}
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3a0cb0b..1e83c8c 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -409,7 +409,7 @@
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
{
u64 addr;
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
addr = virt_to_phys((void *)iommu->ir_table->base);
@@ -420,9 +420,8 @@
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
/* Set interrupt-remapping table pointer */
- cmd = iommu->gcmd | DMA_GCMD_SIRTP;
iommu->gcmd |= DMA_GCMD_SIRTP;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
@@ -437,9 +436,8 @@
spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */
- cmd = iommu->gcmd | DMA_GCMD_IRE;
iommu->gcmd |= DMA_GCMD_IRE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b497daa..e3a8721 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -5,6 +5,7 @@
*
* PCI Express I/O Virtualization (IOV) support.
* Single Root IOV 1.0
+ * Address Translation Service 1.0
*/
#include <linux/pci.h>
@@ -110,7 +111,7 @@
}
if (reset)
- pci_execute_reset_function(virtfn);
+ __pci_reset_function(virtfn);
pci_device_add(virtfn, virtfn->bus);
mutex_unlock(&iov->dev->sriov->lock);
@@ -164,7 +165,7 @@
if (reset) {
device_release_driver(&virtfn->dev);
- pci_execute_reset_function(virtfn);
+ __pci_reset_function(virtfn);
}
sprintf(buf, "virtfn%u", id);
@@ -487,13 +488,15 @@
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+ if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
+ iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
if (pdev)
iov->dev = pci_dev_get(pdev);
- else {
+ else
iov->dev = dev;
- mutex_init(&iov->lock);
- }
+
+ mutex_init(&iov->lock);
dev->sriov = iov;
dev->is_physfn = 1;
@@ -513,11 +516,11 @@
{
BUG_ON(dev->sriov->nr_virtfn);
- if (dev == dev->sriov->dev)
- mutex_destroy(&dev->sriov->lock);
- else
+ if (dev != dev->sriov->dev)
pci_dev_put(dev->sriov->dev);
+ mutex_destroy(&dev->sriov->lock);
+
kfree(dev->sriov);
dev->sriov = NULL;
}
@@ -679,3 +682,145 @@
return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
}
EXPORT_SYMBOL_GPL(pci_sriov_migration);
+
+static int ats_alloc_one(struct pci_dev *dev, int ps)
+{
+ int pos;
+ u16 cap;
+ struct pci_ats *ats;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ ats = kzalloc(sizeof(*ats), GFP_KERNEL);
+ if (!ats)
+ return -ENOMEM;
+
+ ats->pos = pos;
+ ats->stu = ps;
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+ ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+ dev->ats = ats;
+
+ return 0;
+}
+
+static void ats_free_one(struct pci_dev *dev)
+{
+ kfree(dev->ats);
+ dev->ats = NULL;
+}
+
+/**
+ * pci_enable_ats - enable the ATS capability
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ int rc;
+ u16 ctrl;
+
+ BUG_ON(dev->ats && dev->ats->is_enabled);
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ if (pdev->ats)
+ rc = pdev->ats->stu == ps ? 0 : -EINVAL;
+ else
+ rc = ats_alloc_one(pdev, ps);
+
+ if (!rc)
+ pdev->ats->ref_cnt++;
+ mutex_unlock(&pdev->sriov->lock);
+ if (rc)
+ return rc;
+ }
+
+ if (!dev->is_physfn) {
+ rc = ats_alloc_one(dev, ps);
+ if (rc)
+ return rc;
+ }
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 1;
+
+ return 0;
+}
+
+/**
+ * pci_disable_ats - disable the ATS capability
+ * @dev: the PCI device
+ */
+void pci_disable_ats(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ BUG_ON(!dev->ats || !dev->ats->is_enabled);
+
+ pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 0;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ pdev->ats->ref_cnt--;
+ if (!pdev->ats->ref_cnt)
+ ats_free_one(pdev);
+ mutex_unlock(&pdev->sriov->lock);
+ }
+
+ if (!dev->is_physfn)
+ ats_free_one(dev);
+}
+
+/**
+ * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
+ * @dev: the PCI device
+ *
+ * Returns the queue depth on success, or negative on failure.
+ *
+ * The ATS spec uses 0 in the Invalidate Queue Depth field to
+ * indicate that the function can accept 32 Invalidate Request.
+ * But here we use the `real' values (i.e. 1~32) for the Queue
+ * Depth; and 0 indicates the function shares the Queue with
+ * other functions (doesn't exclusively own a Queue).
+ */
+int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ if (dev->ats)
+ return dev->ats->qdep;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3627732..d9f06fb 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -75,22 +75,17 @@
}
#endif
-static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
+static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
{
u16 control;
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
- control &= ~PCI_MSI_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
- }
-}
+ BUG_ON(!pos);
-static void msi_set_enable(struct pci_dev *dev, int enable)
-{
- __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
}
static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -131,9 +126,6 @@
* mask all MSI interrupts by clearing the MSI enable bit does not work
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
- *
- * Returns 1 if it succeeded in masking the interrupt and 0 if the device
- * doesn't support MSI masking.
*/
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
@@ -303,7 +295,7 @@
pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 0);
+ msi_set_enable(dev, pos, 0);
write_msi_msg(dev->irq, &entry->msg);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -321,22 +313,22 @@
if (!dev->msix_enabled)
return;
+ BUG_ON(list_empty(&dev->msi_list));
+ entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+ pos = entry->msi_attrib.pos;
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
/* route the table */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 0);
+ control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
write_msi_msg(entry->irq, &entry->msg);
msix_mask_irq(entry, entry->masked);
}
- BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- pos = entry->msi_attrib.pos;
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
control &= ~PCI_MSIX_FLAGS_MASKALL;
- control |= PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
}
@@ -365,9 +357,9 @@
u16 control;
unsigned mask;
- msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
+
pci_read_config_word(dev, msi_control_reg(pos), &control);
/* MSI Entry Initialization */
entry = alloc_msi_entry(dev);
@@ -381,7 +373,7 @@
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.pos = pos;
- entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
+ entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -399,7 +391,7 @@
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 1);
+ msi_set_enable(dev, pos, 1);
dev->msi_enabled = 1;
dev->irq = entry->irq;
@@ -427,11 +419,14 @@
u8 bir;
void __iomem *base;
- msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+
+ /* Ensure MSI-X is disabled while it is set up */
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
/* Request & Map MSI-X table region */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
nr_entries = multi_msix_capable(control);
pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -442,7 +437,6 @@
if (base == NULL)
return -ENOMEM;
- /* MSI-X Table Initialization */
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
if (!entry)
@@ -455,7 +449,6 @@
entry->msi_attrib.default_irq = dev->irq;
entry->msi_attrib.pos = pos;
entry->mask_base = base;
- msix_mask_irq(entry, 1);
list_add_tail(&entry->list, &dev->msi_list);
}
@@ -480,22 +473,31 @@
return ret;
}
+ /*
+ * Some devices require MSI-X to be enabled before we can touch the
+ * MSI-X registers. We need to mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
i = 0;
list_for_each_entry(entry, &dev->msi_list, list) {
entries[i].vector = entry->irq;
set_irq_msi(entry->irq, entry);
+ j = entries[i].entry;
+ entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ msix_mask_irq(entry, 1);
i++;
}
- /* Set MSI-X enabled bits */
+
+ /* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 1);
dev->msix_enabled = 1;
- list_for_each_entry(entry, &dev->msi_list, list) {
- int vector = entry->msi_attrib.entry_nr;
- entry->masked = readl(base + vector * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
- }
+ control &= ~PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
return 0;
}
@@ -596,17 +598,20 @@
struct msi_desc *desc;
u32 mask;
u16 ctrl;
+ unsigned pos;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- msi_set_enable(dev, 0);
+ BUG_ON(list_empty(&dev->msi_list));
+ desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ pos = desc->msi_attrib.pos;
+
+ msi_set_enable(dev, pos, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
- BUG_ON(list_empty(&dev->msi_list));
- desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
msi_mask_irq(desc, mask, ~mask);
@@ -648,10 +653,7 @@
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
if (entry->msi_attrib.is_msix) {
- writel(1, entry->mask_base + entry->msi_attrib.entry_nr
- * PCI_MSIX_ENTRY_SIZE
- + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
-
+ msix_mask_irq(entry, 1);
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
@@ -691,8 +693,8 @@
* indicates the successful configuration of MSI-X capability structure
* with new allocated MSI-X irqs. A return of < 0 indicates a failure.
* Or a return of > 0 indicates that driver request is exceeding the number
- * of irqs available. Driver should use the returned value to re-send
- * its request.
+ * of irqs or MSI-X vectors available. Driver should use the returned value to
+ * re-send its request.
**/
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{
@@ -708,7 +710,7 @@
nr_entries = pci_msix_table_size(dev);
if (nvec > nr_entries)
- return -EINVAL;
+ return nr_entries;
/* Check for any invalid entries */
for (i = 0; i < nvec; i++) {
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 71f4df2..a066284 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -16,21 +16,15 @@
#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
#define msi_data_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 )
-#define msi_mask_bits_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
-#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
+ (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
+#define msi_mask_reg(base, is64bit) \
+ (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
#define msix_table_offset_reg(base) (base + 0x04)
#define msix_pba_offset_reg(base) (base + 0x08)
-#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
-#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-#define multi_msix_capable msix_table_size
-#define msix_unmask(address) (address & ~PCI_MSIX_FLAGS_BITMASK)
-#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
-#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
+#define multi_msix_capable(control) msix_table_size((control))
#endif /* MSI_H */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 07bbb9b..6c93af5 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -485,6 +485,8 @@
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
+ case PCI_D3hot:
+ case PCI_D3cold:
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
@@ -1208,7 +1210,7 @@
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int error = 0;
bool pme_done = false;
@@ -1287,15 +1289,14 @@
default:
target_state = state;
}
+ } else if (!dev->pm_cap) {
+ target_state = PCI_D0;
} else if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
* to generate PME#.
*/
- if (!dev->pm_cap)
- return PCI_POWER_ERROR;
-
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
@@ -1532,7 +1533,7 @@
if (!pin)
return -1;
- while (dev->bus->parent) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -1552,7 +1553,7 @@
{
u8 pin = *pinp;
- while (dev->bus->parent) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -2058,111 +2059,177 @@
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
#endif
-static int __pcie_flr(struct pci_dev *dev, int probe)
+static int pcie_flr(struct pci_dev *dev, int probe)
{
- u16 status;
+ int i;
+ int pos;
u32 cap;
- int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ u16 status;
- if (!exppos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
return -ENOTTY;
- pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+
+ pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto transaction_done;
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
- msleep(100);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto transaction_done;
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ if (!(status & PCI_EXP_DEVSTA_TRPND))
+ goto clear;
+ }
- dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
- "sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
-transaction_done:
- pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
+clear:
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR);
- mdelay(100);
+ msleep(100);
- pci_unblock_user_cfg_access(dev);
return 0;
}
-static int __pci_af_flr(struct pci_dev *dev, int probe)
+static int pci_af_flr(struct pci_dev *dev, int probe)
{
- int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
- u8 status;
+ int i;
+ int pos;
u8 cap;
+ u8 status;
- if (!cappos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_AF);
+ if (!pos)
return -ENOTTY;
- pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
+
+ pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto transaction_done;
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+ pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
+ if (!(status & PCI_AF_STATUS_TP))
+ goto clear;
+ }
+
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
+
+clear:
+ pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
msleep(100);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto transaction_done;
- dev_info(&dev->dev, "Busy after 100ms while trying to"
- " reset; sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (status & PCI_AF_STATUS_TP)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
-
-transaction_done:
- pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
- mdelay(100);
-
- pci_unblock_user_cfg_access(dev);
return 0;
}
-static int __pci_reset_function(struct pci_dev *pdev, int probe)
+static int pci_pm_reset(struct pci_dev *dev, int probe)
{
- int res;
+ u16 csr;
- res = __pcie_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ if (!dev->pm_cap)
+ return -ENOTTY;
- res = __pci_af_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
+ if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
+ return -ENOTTY;
- return res;
+ if (probe)
+ return 0;
+
+ if (dev->current_state != PCI_D0)
+ return -EINVAL;
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
+
+ return 0;
+}
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
+ u16 ctrl;
+ struct pci_dev *pdev;
+
+ if (dev->subordinate)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
+ return 0;
+}
+
+static int pci_dev_reset(struct pci_dev *dev, int probe)
+{
+ int rc;
+
+ might_sleep();
+
+ if (!probe) {
+ pci_block_user_cfg_access(dev);
+ /* block PM suspend, driver probe, etc. */
+ down(&dev->dev.sem);
+ }
+
+ rc = pcie_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_af_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_pm_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_parent_bus_reset(dev, probe);
+done:
+ if (!probe) {
+ up(&dev->dev.sem);
+ pci_unblock_user_cfg_access(dev);
+ }
+
+ return rc;
}
/**
- * pci_execute_reset_function() - Reset a PCI device function
- * @dev: Device function to reset
+ * __pci_reset_function - reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2174,18 +2241,18 @@
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
* etc.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
-int pci_execute_reset_function(struct pci_dev *dev)
+int __pci_reset_function(struct pci_dev *dev)
{
- return __pci_reset_function(dev, 0);
+ return pci_dev_reset(dev, 0);
}
-EXPORT_SYMBOL_GPL(pci_execute_reset_function);
+EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
- * pci_reset_function() - quiesce and reset a PCI device function
- * @dev: Device function to reset
+ * pci_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2193,32 +2260,33 @@
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from pci_execute_reset_function in that it saves and restores device state
+ * from __pci_reset_function in that it saves and restores device state
* over the reset.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
int pci_reset_function(struct pci_dev *dev)
{
- int r = __pci_reset_function(dev, 1);
+ int rc;
- if (r < 0)
- return r;
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- disable_irq(dev->irq);
pci_save_state(dev);
+ /*
+ * both INTx and MSI are disabled after the Interrupt Disable bit
+ * is set and the Bus Master bit is cleared.
+ */
pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
- r = pci_execute_reset_function(dev);
+ rc = pci_dev_reset(dev, 0);
pci_restore_state(dev);
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- enable_irq(dev->irq);
- return r;
+ return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -2591,6 +2659,8 @@
} else if (!strncmp(str, "resource_alignment=", 19)) {
pci_set_resource_alignment_param(str + 19,
strlen(str + 19));
+ } else if (!strncmp(str, "ecrc=", 5)) {
+ pcie_ecrc_get_policy(str + 5);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d03f6b9..f73bcbe 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -229,6 +229,15 @@
u8 __iomem *mstate; /* VF Migration State Array */
};
+/* Address Translation Service */
+struct pci_ats {
+ int pos; /* capability position */
+ int stu; /* Smallest Translation Unit */
+ int qdep; /* Invalidate Queue Depth */
+ int ref_cnt; /* Physical Function reference count */
+ int is_enabled:1; /* Enable bit is set */
+};
+
#ifdef CONFIG_PCI_IOV
extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
@@ -236,6 +245,20 @@
enum pci_bar_type *type);
extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus);
+
+extern int pci_enable_ats(struct pci_dev *dev, int ps);
+extern void pci_disable_ats(struct pci_dev *dev);
+extern int pci_ats_queue_depth(struct pci_dev *dev);
+/**
+ * pci_ats_enabled - query the ATS status
+ * @dev: the PCI device
+ *
+ * Returns 1 if ATS capability is enabled, or 0 if not.
+ */
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return dev->ats && dev->ats->is_enabled;
+}
#else
static inline int pci_iov_init(struct pci_dev *dev)
{
@@ -257,6 +280,22 @@
{
return 0;
}
+
+static inline int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ return -ENODEV;
+}
+static inline void pci_disable_ats(struct pci_dev *dev)
+{
+}
+static inline int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return 0;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index c3bde58..50e94e0 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -10,3 +10,18 @@
This enables PCI Express Root Port Advanced Error Reporting
(AER) driver support. Error reporting messages sent to Root
Port will be handled by PCI Express AER driver.
+
+
+#
+# PCI Express ECRC
+#
+config PCIE_ECRC
+ bool "PCI Express ECRC settings control"
+ depends on PCIEAER
+ help
+ Used to override firmware/bios settings for PCI Express ECRC
+ (transaction layer end-to-end CRC checking).
+
+ When in doubt, say N.
+
+source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 0000000..b8c925c
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
+#
+# PCI Express Root Port Device AER Debug Configuration
+#
+
+config PCIEAER_INJECT
+ tristate "PCIE AER error injector support"
+ depends on PCIEAER
+ default n
+ help
+ This enables PCI Express Root Port Advanced Error Reporting
+ (AER) software error injector.
+
+ Debuging PCIE AER code is quite difficult because it is hard
+ to trigger various real hardware errors. Software based
+ error injection can fake almost all kinds of errors with the
+ help of a user space helper tool aer-inject, which can be
+ gotten from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 8da3bd8..2cba675 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,6 +4,9 @@
obj-$(CONFIG_PCIEAER) += aerdriver.o
+obj-$(CONFIG_PCIE_ECRC) += ecrc.o
+
aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
+obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 0000000..d92ae21
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,473 @@
+/*
+ * PCIE AER software error injection support.
+ *
+ * Debuging PCIE AER code is quite difficult because it is hard to
+ * trigger various real hardware errors. Software based error
+ * injection can fake almost all kinds of errors with the help of a
+ * user space helper tool aer-inject, which can be gotten from:
+ * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+ *
+ * Copyright 2009 Intel Corporation.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include "aerdrv.h"
+
+struct aer_error_inj
+{
+ u8 bus;
+ u8 dev;
+ u8 fn;
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+};
+
+struct aer_error
+{
+ struct list_head list;
+ unsigned int bus;
+ unsigned int devfn;
+ int pos_cap_err;
+
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+ u32 root_status;
+ u32 source_id;
+};
+
+struct pci_bus_ops
+{
+ struct list_head list;
+ struct pci_bus *bus;
+ struct pci_ops *ops;
+};
+
+static LIST_HEAD(einjected);
+
+static LIST_HEAD(pci_bus_ops_list);
+
+/* Protect einjected and pci_bus_ops_list */
+static DEFINE_SPINLOCK(inject_lock);
+
+static void aer_error_init(struct aer_error *err, unsigned int bus,
+ unsigned int devfn, int pos_cap_err)
+{
+ INIT_LIST_HEAD(&err->list);
+ err->bus = bus;
+ err->devfn = devfn;
+ err->pos_cap_err = pos_cap_err;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
+{
+ struct aer_error *err;
+
+ list_for_each_entry(err, &einjected, list) {
+ if (bus == err->bus && devfn == err->devfn)
+ return err;
+ }
+ return NULL;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
+{
+ return __find_aer_error(dev->bus->number, dev->devfn);
+}
+
+/* inject_lock must be held before calling */
+static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
+{
+ struct pci_bus_ops *bus_ops;
+
+ list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
+ if (bus_ops->bus == bus)
+ return bus_ops->ops;
+ }
+ return NULL;
+}
+
+static struct pci_bus_ops *pci_bus_ops_pop(void)
+{
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops = NULL;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (list_empty(&pci_bus_ops_list))
+ bus_ops = NULL;
+ else {
+ struct list_head *lh = pci_bus_ops_list.next;
+ list_del(lh);
+ bus_ops = list_entry(lh, struct pci_bus_ops, list);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return bus_ops;
+}
+
+static u32 *find_pci_config_dword(struct aer_error *err, int where,
+ int *prw1cs)
+{
+ int rw1cs = 0;
+ u32 *target = NULL;
+
+ if (err->pos_cap_err == -1)
+ return NULL;
+
+ switch (where - err->pos_cap_err) {
+ case PCI_ERR_UNCOR_STATUS:
+ target = &err->uncor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_COR_STATUS:
+ target = &err->cor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_HEADER_LOG:
+ target = &err->header_log0;
+ break;
+ case PCI_ERR_HEADER_LOG+4:
+ target = &err->header_log1;
+ break;
+ case PCI_ERR_HEADER_LOG+8:
+ target = &err->header_log2;
+ break;
+ case PCI_ERR_HEADER_LOG+12:
+ target = &err->header_log3;
+ break;
+ case PCI_ERR_ROOT_STATUS:
+ target = &err->root_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_ROOT_COR_SRC:
+ target = &err->source_id;
+ break;
+ }
+ if (prw1cs)
+ *prw1cs = rw1cs;
+ return target;
+}
+
+static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, NULL);
+ if (sim) {
+ *val = *sim;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->read(bus, devfn, where, size, val);
+}
+
+int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ int rw1cs;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, &rw1cs);
+ if (sim) {
+ if (rw1cs)
+ *sim ^= val;
+ else
+ *sim = val;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops pci_ops_aer = {
+ .read = pci_read_aer,
+ .write = pci_write_aer,
+};
+
+static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
+ struct pci_bus *bus,
+ struct pci_ops *ops)
+{
+ INIT_LIST_HEAD(&bus_ops->list);
+ bus_ops->bus = bus;
+ bus_ops->ops = ops;
+}
+
+static int pci_bus_set_aer_ops(struct pci_bus *bus)
+{
+ struct pci_ops *ops;
+ struct pci_bus_ops *bus_ops;
+ unsigned long flags;
+
+ bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+ ops = pci_bus_set_ops(bus, &pci_ops_aer);
+ spin_lock_irqsave(&inject_lock, flags);
+ if (ops == &pci_ops_aer)
+ goto out;
+ pci_bus_ops_init(bus_ops, bus, ops);
+ list_add(&bus_ops->list, &pci_bus_ops_list);
+ bus_ops = NULL;
+out:
+ spin_unlock_irqrestore(&inject_lock, flags);
+ if (bus_ops)
+ kfree(bus_ops);
+ return 0;
+}
+
+static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+ while (1) {
+ if (!dev->is_pcie)
+ break;
+ if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ if (!dev->bus->self)
+ break;
+ dev = dev->bus->self;
+ }
+ return NULL;
+}
+
+static int find_aer_device_iter(struct device *device, void *data)
+{
+ struct pcie_device **result = data;
+ struct pcie_device *pcie_dev;
+
+ if (device->bus == &pcie_port_bus_type) {
+ pcie_dev = to_pcie_device(device);
+ if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
+ *result = pcie_dev;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
+{
+ return device_for_each_child(&dev->dev, result, find_aer_device_iter);
+}
+
+static int aer_inject(struct aer_error_inj *einj)
+{
+ struct aer_error *err, *rperr;
+ struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
+ struct pci_dev *dev, *rpdev;
+ struct pcie_device *edev;
+ unsigned long flags;
+ unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
+ int pos_cap_err, rp_pos_cap_err;
+ u32 sever;
+ int ret = 0;
+
+ dev = pci_get_bus_and_slot(einj->bus, devfn);
+ if (!dev)
+ return -EINVAL;
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
+
+ rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
+ if (!rp_pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+
+ err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!err_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!rperr_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+
+ err = __find_aer_error_by_dev(dev);
+ if (!err) {
+ err = err_alloc;
+ err_alloc = NULL;
+ aer_error_init(err, einj->bus, devfn, pos_cap_err);
+ list_add(&err->list, &einjected);
+ }
+ err->uncor_status |= einj->uncor_status;
+ err->cor_status |= einj->cor_status;
+ err->header_log0 = einj->header_log0;
+ err->header_log1 = einj->header_log1;
+ err->header_log2 = einj->header_log2;
+ err->header_log3 = einj->header_log3;
+
+ rperr = __find_aer_error_by_dev(rpdev);
+ if (!rperr) {
+ rperr = rperr_alloc;
+ rperr_alloc = NULL;
+ aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
+ rp_pos_cap_err);
+ list_add(&rperr->list, &einjected);
+ }
+ if (einj->cor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
+ else
+ rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
+ rperr->source_id &= 0xffff0000;
+ rperr->source_id |= (einj->bus << 8) | devfn;
+ }
+ if (einj->uncor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ if (sever & einj->uncor_status) {
+ rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
+ if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
+ rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
+ } else
+ rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
+ rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
+ rperr->source_id &= 0x0000ffff;
+ rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+
+ ret = pci_bus_set_aer_ops(dev->bus);
+ if (ret)
+ goto out_put;
+ ret = pci_bus_set_aer_ops(rpdev->bus);
+ if (ret)
+ goto out_put;
+
+ if (find_aer_device(rpdev, &edev))
+ aer_irq(-1, edev);
+ else
+ ret = -EINVAL;
+out_put:
+ if (err_alloc)
+ kfree(err_alloc);
+ if (rperr_alloc)
+ kfree(rperr_alloc);
+ pci_dev_put(dev);
+ return ret;
+}
+
+static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ struct aer_error_inj einj;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (usize != sizeof(struct aer_error_inj))
+ return -EINVAL;
+
+ if (copy_from_user(&einj, ubuf, usize))
+ return -EFAULT;
+
+ ret = aer_inject(&einj);
+ return ret ? ret : usize;
+}
+
+static const struct file_operations aer_inject_fops = {
+ .write = aer_inject_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice aer_inject_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "aer_inject",
+ .fops = &aer_inject_fops,
+};
+
+static int __init aer_inject_init(void)
+{
+ return misc_register(&aer_inject_device);
+}
+
+static void __exit aer_inject_exit(void)
+{
+ struct aer_error *err, *err_next;
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops;
+
+ misc_deregister(&aer_inject_device);
+
+ while ((bus_ops = pci_bus_ops_pop())) {
+ pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
+ kfree(bus_ops);
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+ list_for_each_entry_safe(err, err_next,
+ &pci_bus_ops_list, list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+}
+
+module_init(aer_inject_init);
+module_exit(aer_inject_exit);
+
+MODULE_DESCRIPTION("PCIE AER software error injector");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 32ade5a..4770f13 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -77,7 +77,7 @@
*
* Invoked when Root Port detects AER messages.
**/
-static irqreturn_t aer_irq(int irq, void *context)
+irqreturn_t aer_irq(int irq, void *context)
{
unsigned int status, id;
struct pcie_device *pdev = (struct pcie_device *)context;
@@ -126,6 +126,7 @@
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(aer_irq);
/**
* aer_alloc_rpc - allocate Root Port data structure
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index aa14482..bbd7428 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
+#include <linux/interrupt.h>
#define AER_NONFATAL 0
#define AER_FATAL 1
@@ -56,7 +57,11 @@
unsigned int dw3;
};
+#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
+ struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int error_dev_num;
+ u16 id;
int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
int flags;
unsigned int status; /* COR/UNCOR Error Status */
@@ -120,6 +125,7 @@
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+extern irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI
extern int aer_osc_setup(struct pcie_device *pciedev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 307452f..3d88727 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -26,7 +26,9 @@
#include "aerdrv.h"
static int forceload;
+static int nosourceid;
module_param(forceload, bool, 0);
+module_param(nosourceid, bool, 0);
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -109,19 +111,23 @@
#endif /* 0 */
-static void set_device_error_reporting(struct pci_dev *dev, void *data)
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
- if (dev->pcie_type != PCIE_RC_PORT &&
- dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
- dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
- return;
+ if (dev->pcie_type == PCIE_RC_PORT ||
+ dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
+ dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
}
/**
@@ -139,73 +145,148 @@
pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
}
-static int find_device_iter(struct device *device, void *data)
+static inline int compare_device_id(struct pci_dev *dev,
+ struct aer_err_info *e_info)
{
- struct pci_dev *dev;
- u16 id = *(unsigned long *)data;
- u8 secondary, subordinate, d_bus = id >> 8;
-
- if (device->bus == &pci_bus_type) {
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn)) {
- /*
- * Device ID match
- */
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
-
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
/*
- * If device is P2P, check if it is an upstream?
+ * Device ID match
*/
- if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(dev, PCI_SECONDARY_BUS,
- &secondary);
- pci_read_config_byte(dev, PCI_SUBORDINATE_BUS,
- &subordinate);
- if (d_bus >= secondary && d_bus <= subordinate) {
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
- }
+ return 1;
}
return 0;
}
+static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
+{
+ if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
+ e_info->dev[e_info->error_dev_num] = dev;
+ e_info->error_dev_num++;
+ return 1;
+ } else
+ return 0;
+}
+
+
+#define PCI_BUS(x) (((x) >> 8) & 0xff)
+
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ int pos;
+ u32 status;
+ u32 mask;
+ u16 reg16;
+ int result;
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ /*
+ * When bus id is equal to 0, it might be a bad id
+ * reported by root port.
+ */
+ if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
+ result = compare_device_id(dev, e_info);
+ if (result)
+ add_error_device(e_info, dev);
+
+ /*
+ * If there is no multiple error, we stop
+ * or continue based on the id comparing.
+ */
+ if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
+ return result;
+
+ /*
+ * If there are multiple errors and id does match,
+ * We need continue to search other devices under
+ * the root port. Return 0 means that.
+ */
+ if (result)
+ return 0;
+ }
+
+ /*
+ * When either
+ * 1) nosourceid==y;
+ * 2) bus id is equal to 0. Some ports might lose the bus
+ * id of error source id;
+ * 3) There are multiple errors and prior id comparing fails;
+ * We check AER status registers to find the initial reporter.
+ */
+ if (atomic_read(&dev->enable_cnt) == 0)
+ return 0;
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ /* Check if AER is enabled */
+ pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, ®16);
+ if (!(reg16 & (
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE)))
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return 0;
+
+ status = 0;
+ mask = 0;
+ if (e_info->severity == AER_CORRECTABLE) {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_MASK,
+ &mask);
+ if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
+ }
+ } else {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_MASK,
+ &mask);
+ if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
+ }
+ }
+
+ return 0;
+
+added:
+ if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
+ return 0;
+ else
+ return 1;
+}
+
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @id: device ID of agent who sends an error message to this Root Port
+ * @err_info: including detailed error information such like id
*
* Invoked when error is detected at the Root Port.
*/
-static struct device* find_source_device(struct pci_dev *parent, u16 id)
+static void find_source_device(struct pci_dev *parent,
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
- struct device *device;
- unsigned long device_addr;
- int status;
+ int result;
/* Is Root Port an agent that sends error message? */
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return &dev->dev;
+ result = find_device_iter(dev, e_info);
+ if (result)
+ return;
- do {
- device_addr = id;
- if ((status = device_for_each_child(&dev->dev,
- &device_addr, find_device_iter))) {
- device = (struct device*)device_addr;
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return device;
- }
- }while (status);
-
- return NULL;
+ pci_walk_bus(parent->subordinate, find_device_iter, e_info);
}
-static void report_error_detected(struct pci_dev *dev, void *data)
+static int report_error_detected(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -230,16 +311,16 @@
dev->driver ?
"no AER-aware driver" : "no driver");
}
- return;
+ return 0;
}
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_mmio_enabled(struct pci_dev *dev, void *data)
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -249,15 +330,15 @@
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_slot_reset(struct pci_dev *dev, void *data)
+static int report_slot_reset(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -267,15 +348,15 @@
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_resume(struct pci_dev *dev, void *data)
+static int report_resume(struct pci_dev *dev, void *data)
{
struct pci_error_handlers *err_handler;
@@ -284,11 +365,11 @@
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
- return;
+ return 0;
}
/**
@@ -305,7 +386,7 @@
static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
enum pci_channel_state state,
char *error_mesg,
- void (*cb)(struct pci_dev *, void *))
+ int (*cb)(struct pci_dev *, void *))
{
struct aer_broadcast_data result_data;
@@ -497,12 +578,12 @@
*/
static void handle_error_source(struct pcie_device * aerdev,
struct pci_dev *dev,
- struct aer_err_info info)
+ struct aer_err_info *info)
{
pci_ers_result_t status = 0;
int pos;
- if (info.severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE) {
/*
* Correctable error does not need software intevention.
* No need to go through error recovery process.
@@ -510,9 +591,9 @@
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
- info.status);
+ info->status);
} else {
- status = do_recovery(aerdev, dev, info.severity);
+ status = do_recovery(aerdev, dev, info->severity);
if (status == PCI_ERS_RESULT_RECOVERED) {
dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
"successfully recovered\n");
@@ -661,6 +742,28 @@
return AER_SUCCESS;
}
+static inline void aer_process_err_devices(struct pcie_device *p_device,
+ struct aer_err_info *e_info)
+{
+ int i;
+
+ if (!e_info->dev[0]) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ }
+
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info) ==
+ AER_SUCCESS) {
+ aer_print_error(e_info->dev[i], e_info);
+ handle_error_source(p_device,
+ e_info->dev[i],
+ e_info);
+ }
+ }
+}
+
/**
* aer_isr_one_error - consume an error detected by root port
* @p_device: pointer to error root port service device
@@ -669,10 +772,16 @@
static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
- struct device *s_device;
- struct aer_err_info e_info = {0, 0, 0,};
+ struct aer_err_info *e_info;
int i;
- u16 id;
+
+ /* struct aer_err_info might be big, so we allocate it with slab */
+ e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
+ if (e_info == NULL) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "Can't allocate mem when processing AER errors\n");
+ return;
+ }
/*
* There is a possibility that both correctable error and
@@ -684,31 +793,26 @@
if (!(e_src->status & i))
continue;
+ memset(e_info, 0, sizeof(struct aer_err_info));
+
/* Init comprehensive error information */
if (i & PCI_ERR_ROOT_COR_RCV) {
- id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
} else {
- id = ERR_UNCOR_ID(e_src->id);
- e_info.severity = ((e_src->status >> 6) & 1);
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+ e_info->severity = ((e_src->status >> 6) & 1);
}
if (e_src->status &
(PCI_ERR_ROOT_MULTI_COR_RCV |
PCI_ERR_ROOT_MULTI_UNCOR_RCV))
- e_info.flags |= AER_MULTI_ERROR_VALID_FLAG;
- if (!(s_device = find_source_device(p_device->port, id))) {
- printk(KERN_DEBUG "%s->can't find device of ID%04x\n",
- __func__, id);
- continue;
- }
- if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
- AER_SUCCESS) {
- aer_print_error(to_pci_dev(s_device), &e_info);
- handle_error_source(p_device,
- to_pci_dev(s_device),
- e_info);
- }
+ e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
+
+ find_source_device(p_device->port, e_info);
+ aer_process_err_devices(p_device, e_info);
}
+
+ kfree(e_info);
}
/**
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 0000000..ece97df
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
+/*
+ * Enables/disables PCIe ECRC checking.
+ *
+ * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ * Andrew Patterson <andrew.patterson@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/errno.h>
+#include "../../pci.h"
+
+#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
+#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
+#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
+
+static int ecrc_policy = ECRC_POLICY_DEFAULT;
+
+static const char *ecrc_policy_str[] = {
+ [ECRC_POLICY_DEFAULT] = "bios",
+ [ECRC_POLICY_OFF] = "off",
+ [ECRC_POLICY_ON] = "on"
+};
+
+/**
+ * enable_ercr_checking - enable PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int enable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
+ if (reg32 & PCI_ERR_CAP_ECRC_GENC)
+ reg32 |= PCI_ERR_CAP_ECRC_GENE;
+ if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
+ reg32 |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * disable_ercr_checking - disables PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int disable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
+ reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
+ * @dev: the PCI device
+ */
+void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ switch (ecrc_policy) {
+ case ECRC_POLICY_DEFAULT:
+ return;
+ case ECRC_POLICY_OFF:
+ disable_ecrc_checking(dev);
+ break;
+ case ECRC_POLICY_ON:
+ enable_ecrc_checking(dev);;
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ */
+void pcie_ecrc_get_policy(char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
+ if (!strncmp(str, ecrc_policy_str[i],
+ strlen(ecrc_policy_str[i])))
+ break;
+ if (i >= ARRAY_SIZE(ecrc_policy_str))
+ return;
+
+ ecrc_policy = i;
+}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0367f1..3d27c97 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,40 +26,36 @@
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-struct endpoint_state {
- unsigned int l0s_acceptable_latency;
- unsigned int l1_acceptable_latency;
+struct aspm_latency {
+ u32 l0s; /* L0s latency (nsec) */
+ u32 l1; /* L1 latency (nsec) */
};
struct pcie_link_state {
- struct list_head sibiling;
- struct pci_dev *pdev;
- bool downstream_has_switch;
-
- struct pcie_link_state *parent;
- struct list_head children;
- struct list_head link;
+ struct pci_dev *pdev; /* Upstream component of the Link */
+ struct pcie_link_state *root; /* pointer to the root port link */
+ struct pcie_link_state *parent; /* pointer to the parent Link state */
+ struct list_head sibling; /* node in link_list */
+ struct list_head children; /* list of child link states */
+ struct list_head link; /* node in parent's children list */
/* ASPM state */
- unsigned int support_state;
- unsigned int enabled_state;
- unsigned int bios_aspm_state;
- /* upstream component */
- unsigned int l0s_upper_latency;
- unsigned int l1_upper_latency;
- /* downstream component */
- unsigned int l0s_down_latency;
- unsigned int l1_down_latency;
- /* Clock PM state*/
- unsigned int clk_pm_capable;
- unsigned int clk_pm_enabled;
- unsigned int bios_clk_state;
+ u32 aspm_support:2; /* Supported ASPM state */
+ u32 aspm_enabled:2; /* Enabled ASPM state */
+ u32 aspm_default:2; /* Default ASPM state by BIOS */
+ /* Clock PM state */
+ u32 clkpm_capable:1; /* Clock PM capable? */
+ u32 clkpm_enabled:1; /* Current Clock PM state */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+
+ /* Latencies */
+ struct aspm_latency latency; /* Exit latency */
/*
- * A pcie downstream port only has one slot under it, so at most there
- * are 8 functions
+ * Endpoint acceptable latencies. A pcie downstream port only
+ * has one slot under it, so at most there are 8 functions.
*/
- struct endpoint_state endpoints[8];
+ struct aspm_latency acceptable[8];
};
static int aspm_disabled, aspm_force;
@@ -78,27 +74,23 @@
#define LINK_RETRAIN_TIMEOUT HZ
-static int policy_to_aspm_state(struct pci_dev *pdev)
+static int policy_to_aspm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_DEFAULT:
- return link_state->bios_aspm_state;
+ return link->aspm_default;
}
return 0;
}
-static int policy_to_clkpm_state(struct pci_dev *pdev)
+static int policy_to_clkpm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
@@ -107,73 +99,78 @@
/* Disable Clock PM */
return 1;
case POLICY_DEFAULT:
- return link_state->bios_clk_state;
+ return link->clkpm_default;
}
return 0;
}
-static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
+static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
- struct pci_dev *child_dev;
int pos;
u16 reg16;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, ®16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16);
if (enable)
reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
else
reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
}
- link_state->clk_pm_enabled = !!enable;
+ link->clkpm_enabled = !!enable;
}
-static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
+static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- int pos;
+ /* Don't enable Clock PM if the link is not Clock PM capable */
+ if (!link->clkpm_capable && enable)
+ return;
+ /* Need nothing if the specified equals to current state */
+ if (link->clkpm_enabled == enable)
+ return;
+ pcie_set_clkpm_nocheck(link, enable);
+}
+
+static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+{
+ int pos, capable = 1, enabled = 1;
u32 reg32;
u16 reg16;
- int capable = 1, enabled = 1;
- struct pci_dev *child_dev;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* All functions should have the same cap and state, take the worst */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, ®32);
+ pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, ®32);
if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
capable = 0;
enabled = 0;
break;
}
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, ®16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16);
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
- link_state->clk_pm_enabled = enabled;
- link_state->bios_clk_state = enabled;
- if (!blacklist) {
- link_state->clk_pm_capable = capable;
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
- } else {
- link_state->clk_pm_capable = 0;
- pcie_set_clock_pm(pdev, 0);
- }
+ link->clkpm_enabled = enabled;
+ link->clkpm_default = enabled;
+ link->clkpm_capable = (blacklist) ? 0 : capable;
}
-static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
+static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
{
- struct pci_dev *child_dev;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
return true;
}
return false;
@@ -184,289 +181,263 @@
* could use common clock. If they are, configure them to use the
* common clock. That will reduce the ASPM state exit latency.
*/
-static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
+static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
- int pos, child_pos, i = 0;
- u16 reg16 = 0;
- struct pci_dev *child_dev;
- int same_clock = 1;
+ int ppos, cpos, same_clock = 1;
+ u16 reg16, parent_reg, child_reg[8];
unsigned long start_jiffies;
- u16 child_regs[8], parent_reg;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/*
- * all functions of a slot should have the same Slot Clock
+ * All functions of a slot should have the same Slot Clock
* Configuration, so just check one function
- * */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- BUG_ON(!child_dev->is_pcie);
+ */
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ BUG_ON(!child->is_pcie);
/* Check downstream component if bit Slot Clock Configuration is 1 */
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, ®16);
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, ®16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Check upstream component if bit Slot Clock Configuration is 1 */
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16);
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Configure downstream component, all functions */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- ®16);
- child_regs[i] = reg16;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, ®16);
+ child_reg[PCI_FUNC(child->devfn)] = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- reg16);
- i++;
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
}
/* Configure upstream component */
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, ®16);
parent_reg = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* retrain link */
+ /* Retrain link */
reg16 |= PCI_EXP_LNKCTL_RL;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* Wait for link training end */
- /* break out after waiting for timeout */
+ /* Wait for link training end. Break out after waiting for timeout */
start_jiffies = jiffies;
for (;;) {
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
break;
msleep(1);
}
- /* training failed -> recover */
- if (reg16 & PCI_EXP_LNKSTA_LT) {
- dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
- " common clock\n");
- i = 0;
- list_for_each_entry(child_dev, &pdev->subordinate->devices,
- bus_list) {
- child_pos = pci_find_capability(child_dev,
- PCI_CAP_ID_EXP);
- pci_write_config_word(child_dev,
- child_pos + PCI_EXP_LNKCTL,
- child_regs[i]);
- i++;
- }
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ return;
+
+ /* Training failed. Restore common clock configurations */
+ dev_printk(KERN_ERR, &parent->dev,
+ "ASPM: Could not configure common clock\n");
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
+ child_reg[PCI_FUNC(child->devfn)]);
}
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
}
-/*
- * calc_L0S_latency: Convert L0s latency encoding to ns
- */
-static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
+/* Convert L0s latency encoding to ns */
+static u32 calc_l0s_latency(u32 encoding)
{
- unsigned int ns = 64;
-
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 5*1000; /* > 4us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+ if (encoding == 0x7)
+ return (5 * 1000); /* > 4us */
+ return (64 << encoding);
}
-/*
- * calc_L1_latency: Convert L1 latency encoding to ns
- */
-static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
+/* Convert L0s acceptable latency encoding to ns */
+static u32 calc_l0s_acceptable(u32 encoding)
{
- unsigned int ns = 1000;
+ if (encoding == 0x7)
+ return -1U;
+ return (64 << encoding);
+}
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 65*1000; /* > 64us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+/* Convert L1 latency encoding to ns */
+static u32 calc_l1_latency(u32 encoding)
+{
+ if (encoding == 0x7)
+ return (65 * 1000); /* > 64us */
+ return (1000 << encoding);
+}
+
+/* Convert L1 acceptable latency encoding to ns */
+static u32 calc_l1_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (1000 << encoding);
}
static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- unsigned int *l0s, unsigned int *l1, unsigned int *enabled)
+ u32 *l0s, u32 *l1, u32 *enabled)
{
int pos;
u16 reg16;
- u32 reg32;
- unsigned int latency;
+ u32 reg32, encoding;
+ *l0s = *l1 = *enabled = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32);
*state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S))
+ *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
*state = 0;
if (*state == 0)
return;
- latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_L0S_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ *l0s = calc_l0s_latency(encoding);
if (*state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_L1_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ *l1 = calc_l1_latency(encoding);
}
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1);
+ *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
}
-static void pcie_aspm_cap_init(struct pci_dev *pdev)
+static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- struct pci_dev *child_dev;
- u32 state, tmp;
- struct pcie_link_state *link_state = pdev->link_state;
+ u32 support, l0s, l1, enabled;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+
+ if (blacklist) {
+ /* Set support state to 0, so we will disable ASPM later */
+ link->aspm_support = 0;
+ link->aspm_default = 0;
+ link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ return;
+ }
+
+ /* Configure common clock before checking latencies */
+ pcie_aspm_configure_common_clock(link);
/* upstream component states */
- pcie_aspm_get_cap_device(pdev, &link_state->support_state,
- &link_state->l0s_upper_latency,
- &link_state->l1_upper_latency,
- &link_state->enabled_state);
+ pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
+ link->aspm_support = support;
+ link->latency.l0s = l0s;
+ link->latency.l1 = l1;
+ link->aspm_enabled = enabled;
+
/* downstream component states, all functions have the same setting */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- pcie_aspm_get_cap_device(child_dev, &state,
- &link_state->l0s_down_latency,
- &link_state->l1_down_latency,
- &tmp);
- link_state->support_state &= state;
- if (!link_state->support_state)
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
+ link->aspm_support &= support;
+ link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
+ link->latency.l1 = max_t(u32, link->latency.l1, l1);
+
+ if (!link->aspm_support)
return;
- link_state->enabled_state &= link_state->support_state;
- link_state->bios_aspm_state = link_state->enabled_state;
+
+ link->aspm_enabled &= link->aspm_support;
+ link->aspm_default = link->aspm_enabled;
/* ENDPOINT states*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
int pos;
- u32 reg32;
- unsigned int latency;
- struct endpoint_state *ep_state =
- &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
+ u32 reg32, encoding;
+ struct aspm_latency *acceptable =
+ &link->acceptable[PCI_FUNC(child->devfn)];
- if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)
+ if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END)
continue;
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, ®32);
- latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- latency = calc_L0S_latency(latency, 1);
- ep_state->l0s_acceptable_latency = latency;
- if (link_state->support_state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- latency = calc_L1_latency(latency, 1);
- ep_state->l1_acceptable_latency = latency;
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32);
+ encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
+ acceptable->l0s = calc_l0s_acceptable(encoding);
+ if (link->aspm_support & PCIE_LINK_STATE_L1) {
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
}
}
}
-static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev,
- unsigned int state)
+/**
+ * __pcie_aspm_check_state_one - check latency for endpoint device.
+ * @endpoint: pointer to the struct pci_dev of endpoint device
+ *
+ * TBD: The latency from the endpoint to root complex vary per switch's
+ * upstream link state above the device. Here we just do a simple check
+ * which assumes all links above the device can be in L1 state, that
+ * is we just consider the worst case. If switch's upstream link can't
+ * be put into L0S/L1, then our check is too strictly.
+ */
+static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
{
- struct pci_dev *parent_dev, *tmp_dev;
- unsigned int latency, l1_latency = 0;
- struct pcie_link_state *link_state;
- struct endpoint_state *ep_state;
+ u32 l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
- parent_dev = pdev->bus->self;
- link_state = parent_dev->link_state;
- state &= link_state->support_state;
- if (state == 0)
- return 0;
- ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)];
+ link = endpoint->bus->self->link_state;
+ state &= link->aspm_support;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
- /*
- * Check latency for endpoint device.
- * TBD: The latency from the endpoint to root complex vary per
- * switch's upstream link state above the device. Here we just do a
- * simple check which assumes all links above the device can be in L1
- * state, that is we just consider the worst case. If switch's upstream
- * link can't be put into L0S/L1, then our check is too strictly.
- */
- tmp_dev = pdev;
- while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
- parent_dev = tmp_dev->bus->self;
- link_state = parent_dev->link_state;
- if (state & PCIE_LINK_STATE_L0S) {
- latency = max_t(unsigned int,
- link_state->l0s_upper_latency,
- link_state->l0s_down_latency);
- if (latency > ep_state->l0s_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L0S;
- }
- if (state & PCIE_LINK_STATE_L1) {
- latency = max_t(unsigned int,
- link_state->l1_upper_latency,
- link_state->l1_down_latency);
- if (latency + l1_latency >
- ep_state->l1_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L1;
- }
- if (!parent_dev->bus->self) /* parent_dev is a root port */
- break;
- else {
- /*
- * parent_dev is the downstream port of a switch, make
- * tmp_dev the upstream port of the switch
- */
- tmp_dev = parent_dev->bus->self;
- /*
- * every switch on the path to root complex need 1 more
- * microsecond for L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- l1_latency += 1000;
- }
+ while (link && state) {
+ if ((state & PCIE_LINK_STATE_L0S) &&
+ (link->latency.l0s > acceptable->l0s))
+ state &= ~PCIE_LINK_STATE_L0S;
+ if ((state & PCIE_LINK_STATE_L1) &&
+ (link->latency.l1 + l1_switch_latency > acceptable->l1))
+ state &= ~PCIE_LINK_STATE_L1;
+ link = link->parent;
+ /*
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ l1_switch_latency += 1000;
}
return state;
}
-static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
- unsigned int state)
+static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
+ pci_power_t power_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* If no child, ignore the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
return state;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- * */
- state = 0;
- break;
- }
- if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END))
+
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ /*
+ * If downstream component of a link is pci bridge, we
+ * disable ASPM for now for the link
+ */
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END))
continue;
/* Device not in D0 doesn't need check latency */
- if (child_dev->current_state == PCI_D1 ||
- child_dev->current_state == PCI_D2 ||
- child_dev->current_state == PCI_D3hot ||
- child_dev->current_state == PCI_D3cold)
+ power_state = child->current_state;
+ if (power_state == PCI_D1 || power_state == PCI_D2 ||
+ power_state == PCI_D3hot || power_state == PCI_D3cold)
continue;
- state = __pcie_aspm_check_state_one(child_dev, state);
+ state = __pcie_aspm_check_state_one(child, state);
}
return state;
}
@@ -482,90 +453,71 @@
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
-static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
+static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
- int valid = 1;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/* If no child, disable the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
state = 0;
/*
- * if the downstream component has pci bridge function, don't do ASPM
- * now
+ * If the downstream component has pci bridge function, don't
+ * do ASPM now.
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- valid = 0;
- break;
- }
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return;
}
- if (!valid)
- return;
-
/*
- * spec 2.0 suggests all functions should be configured the same
- * setting for ASPM. Enabling ASPM L1 should be done in upstream
- * component first and then downstream, and vice versa for disabling
- * ASPM L1. Spec doesn't mention L0S.
+ * Spec 2.0 suggests all functions should be configured the
+ * same setting for ASPM. Enabling ASPM L1 should be done in
+ * upstream component first and then downstream, and vice
+ * versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list)
- __pcie_aspm_config_one_dev(child_dev, state);
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ __pcie_aspm_config_one_dev(child, state);
if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- link_state->enabled_state = state;
+ link->aspm_enabled = state;
}
-static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
+/* Check the whole hierarchy, and configure each link in the hierarchy */
+static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
- struct pcie_link_state *root_port_link = link;
- while (root_port_link->parent)
- root_port_link = root_port_link->parent;
- return root_port_link;
-}
+ struct pcie_link_state *leaf, *root = link->root;
-/* check the whole hierarchy, and configure each link in the hierarchy */
-static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pcie_link_state *link_state = pdev->link_state;
- struct pcie_link_state *root_port_link = get_root_port_link(link_state);
- struct pcie_link_state *leaf;
+ state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
- state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
-
- /* check all links who have specific root port link */
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (!list_empty(&leaf->children) ||
- get_root_port_link(leaf) != root_port_link)
+ /* Check all links who have specific root port link */
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (!list_empty(&leaf->children) || (leaf->root != root))
continue;
- state = pcie_aspm_check_state(leaf->pdev, state);
+ state = pcie_aspm_check_state(leaf, state);
}
- /* check root port link too in case it hasn't children */
- state = pcie_aspm_check_state(root_port_link->pdev, state);
-
- if (link_state->enabled_state == state)
+ /* Check root port link too in case it hasn't children */
+ state = pcie_aspm_check_state(root, state);
+ if (link->aspm_enabled == state)
return;
-
/*
- * we must change the hierarchy. See comments in
+ * We must change the hierarchy. See comments in
* __pcie_aspm_config_link for the order
**/
if (state & PCIE_LINK_STATE_L1) {
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
} else {
- list_for_each_entry_reverse(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry_reverse(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
}
}
@@ -574,45 +526,42 @@
* pcie_aspm_configure_link_state: enable/disable PCI express link state
* @pdev: the root port or switch downstream port
*/
-static void pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
+static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(pdev, state);
+ __pcie_aspm_configure_link_state(link, state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
-static void free_link_state(struct pci_dev *pdev)
+static void free_link_state(struct pcie_link_state *link)
{
- kfree(pdev->link_state);
- pdev->link_state = NULL;
+ link->pdev->link_state = NULL;
+ kfree(link);
}
static int pcie_aspm_sanity_check(struct pci_dev *pdev)
{
- struct pci_dev *child_dev;
- int child_pos;
+ struct pci_dev *child;
+ int pos;
u32 reg32;
-
/*
- * Some functions in a slot might not all be PCIE functions, very
- * strange. Disable ASPM for the whole slot
+ * Some functions in a slot might not all be PCIE functions,
+ * very strange. Disable ASPM for the whole slot
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!child_pos)
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ if (!pos)
return -EINVAL;
-
/*
* Disable ASPM for pre-1.1 PCIe device, we follow MS to use
* RBER bit to determine if a function is 1.1 version device
*/
- pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
- ®32);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32);
if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
- dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM"
+ dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
" on pre-1.1 PCIe device. You can enable it"
" with 'pcie_aspm=force'\n");
return -EINVAL;
@@ -621,6 +570,47 @@
return 0;
}
+static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+ if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ struct pcie_link_state *parent;
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
+ link->parent = parent;
+ list_add(&link->link, &parent->children);
+ }
+ /* Setup a pointer to the root port link */
+ if (!link->parent)
+ link->root = link;
+ else
+ link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+
+ pdev->link_state = link;
+
+ /* Check ASPM capability */
+ pcie_aspm_cap_init(link, blacklist);
+
+ /* Check Clock PM capability */
+ pcie_clkpm_cap_init(link, blacklist);
+
+ return link;
+}
+
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scaned.
@@ -628,75 +618,47 @@
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
- unsigned int state;
- struct pcie_link_state *link_state;
- int error = 0;
- int blacklist;
+ u32 state;
+ struct pcie_link_state *link;
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
return;
+
+ /* VIA has a strange chipset, root port is under a bridge */
+ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->bus->self)
+ return;
+
down_read(&pci_bus_sem);
if (list_empty(&pdev->subordinate->devices))
goto out;
- blacklist = !!pcie_aspm_sanity_check(pdev);
-
mutex_lock(&aspm_lock);
-
- link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
- if (!link_state)
- goto unlock_out;
-
- link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
- INIT_LIST_HEAD(&link_state->children);
- INIT_LIST_HEAD(&link_state->link);
- if (pdev->bus->self) {/* this is a switch */
- struct pcie_link_state *parent_link_state;
-
- parent_link_state = pdev->bus->parent->self->link_state;
- if (!parent_link_state) {
- kfree(link_state);
- goto unlock_out;
- }
- list_add(&link_state->link, &parent_link_state->children);
- link_state->parent = parent_link_state;
- }
-
- pdev->link_state = link_state;
-
- if (!blacklist) {
- pcie_aspm_configure_common_clock(pdev);
- pcie_aspm_cap_init(pdev);
+ link = pcie_aspm_setup_link_state(pdev);
+ if (!link)
+ goto unlock;
+ /*
+ * Setup initial ASPM state
+ *
+ * If link has switch, delay the link config. The leaf link
+ * initialization will config the whole hierarchy. But we must
+ * make sure BIOS doesn't set unsupported link state.
+ */
+ if (pcie_aspm_downstream_has_switch(link)) {
+ state = pcie_aspm_check_state(link, link->aspm_default);
+ __pcie_aspm_config_link(link, state);
} else {
- link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
- link_state->bios_aspm_state = 0;
- /* Set support state to 0, so we will disable ASPM later */
- link_state->support_state = 0;
+ state = policy_to_aspm_state(link);
+ __pcie_aspm_configure_link_state(link, state);
}
- link_state->pdev = pdev;
- list_add(&link_state->sibiling, &link_list);
-
- if (link_state->downstream_has_switch) {
- /*
- * If link has switch, delay the link config. The leaf link
- * initialization will config the whole hierarchy. but we must
- * make sure BIOS doesn't set unsupported link state
- **/
- state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
- __pcie_aspm_config_link(pdev, state);
- } else
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
-
- pcie_check_clock_pm(pdev, blacklist);
-
-unlock_out:
- if (error)
- free_link_state(pdev);
+ /* Setup initial Clock PM state */
+ state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
+ pcie_set_clkpm(link, state);
+unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
@@ -725,11 +687,11 @@
/* All functions are removed, so just disable ASPM for the link */
__pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibiling);
+ list_del(&link_state->sibling);
list_del(&link_state->link);
/* Clock PM is for endpoint device */
- free_link_state(parent);
+ free_link_state(link_state);
out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -749,7 +711,7 @@
* devices changed PM state, we should recheck if latency meets all
* functions' requirement
*/
- pcie_aspm_configure_link_state(pdev, link_state->enabled_state);
+ pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
}
/*
@@ -772,14 +734,12 @@
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link_state = parent->link_state;
- link_state->support_state &=
- ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1));
- if (state & PCIE_LINK_STATE_CLKPM)
- link_state->clk_pm_capable = 0;
-
- __pcie_aspm_configure_link_state(parent, link_state->enabled_state);
- if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
- pcie_set_clock_pm(parent, 0);
+ link_state->aspm_support &= ~state;
+ __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ if (state & PCIE_LINK_STATE_CLKPM) {
+ link_state->clkpm_capable = 0;
+ pcie_set_clkpm(link_state, 0);
+ }
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
@@ -788,7 +748,6 @@
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
- struct pci_dev *pdev;
struct pcie_link_state *link_state;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
@@ -802,14 +761,10 @@
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibiling) {
- pdev = link_state->pdev;
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
- if (link_state->clk_pm_capable &&
- link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-
+ list_for_each_entry(link_state, &link_list, sibling) {
+ __pcie_aspm_configure_link_state(link_state,
+ policy_to_aspm_state(link_state));
+ pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -838,7 +793,7 @@
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->enabled_state);
+ return sprintf(buf, "%d\n", link_state->aspm_enabled);
}
static ssize_t link_state_store(struct device *dev,
@@ -846,7 +801,7 @@
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -854,7 +809,7 @@
state = buf[0]-'0';
if (state >= 0 && state <= 3) {
/* setup link aspm state */
- pcie_aspm_configure_link_state(pci_device, state);
+ pcie_aspm_configure_link_state(pdev->link_state, state);
return n;
}
@@ -868,7 +823,7 @@
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->clk_pm_enabled);
+ return sprintf(buf, "%d\n", link_state->clkpm_enabled);
}
static ssize_t clk_ctl_store(struct device *dev,
@@ -876,7 +831,7 @@
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -885,7 +840,7 @@
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clock_pm(pci_device, !!state);
+ pcie_set_clkpm_nocheck(pdev->link_state, !!state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -904,10 +859,10 @@
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
@@ -920,10 +875,10 @@
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f1ae247..40e75f6 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -193,7 +193,7 @@
res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
if (type == pci_bar_io) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff;
+ mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -237,6 +237,8 @@
dev_printk(KERN_DEBUG, &dev->dev,
"reg %x 64bit mmio: %pR\n", pos, res);
}
+
+ res->flags |= IORESOURCE_MEM_64;
} else {
sz = pci_size(l, sz, mask);
@@ -287,7 +289,7 @@
struct resource *res;
int i;
- if (!child->parent) /* It's a host bus, nothing to read */
+ if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
if (dev->transparent) {
@@ -362,7 +364,10 @@
}
}
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
+ IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bd4253f..56552d7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1133,6 +1133,7 @@
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
case 0x1821: /* M5N notebook */
+ case 0x1897: /* A6L notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
@@ -1163,6 +1164,7 @@
switch (dev->subsystem_device) {
case 0x12bc: /* HP D330L */
case 0x12bd: /* HP D530 */
+ case 0x006a: /* HP Compaq nx9500 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
@@ -2016,6 +2018,28 @@
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
+/* Originally in EDAC sources for i82875P:
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
+{
+ u8 reg;
+
+ if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
+ dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
+ pci_write_config_byte(dev, 0xF4, reg | 0x02);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
+ quirk_unhide_mch_dev6);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
+ quirk_unhide_mch_dev6);
+
+
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 86503c1..176615e 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -32,8 +32,6 @@
static void pci_destroy_dev(struct pci_dev *dev)
{
- pci_stop_dev(dev);
-
/* Remove the device from the device lists, and prevent any further
* list accesses from this device */
down_write(&pci_bus_sem);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 710d4ea..e8cb505 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@
if (pdev->is_pcie)
return NULL;
while (1) {
- if (!pdev->bus->parent)
+ if (pci_is_root_bus(pdev->bus))
break;
pdev = pdev->bus->self;
/* a p2p bridge */
@@ -115,36 +115,6 @@
#ifdef CONFIG_PCI_LEGACY
/**
- * pci_find_slot - locate PCI device from a given PCI slot
- * @bus: number of PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
- * in case of multi-function devices.
- *
- * Given a PCI bus and slot/function number, the desired PCI device
- * is located in system global list of PCI devices. If the device
- * is found, a pointer to its data structure is returned. If no
- * device is found, %NULL is returned.
- *
- * NOTE: Do not use this function any more; use pci_get_slot() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
-{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (dev->bus->number == bus && dev->devfn == devfn) {
- pci_dev_put(dev);
- return dev;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(pci_find_slot);
-
-/**
* pci_find_device - begin or continue searching for a PCI device by vendor/device id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
* @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index a00f854..b636e24 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -58,7 +58,6 @@
res = list->res;
idx = res - &list->dev->resource[0];
if (pci_assign_resource(list->dev, idx)) {
- /* FIXME: get rid of this */
res->start = 0;
res->end = 0;
res->flags = 0;
@@ -143,6 +142,7 @@
struct pci_dev *bridge = bus->self;
struct pci_bus_region region;
u32 l, bu, lu, io_upper16;
+ int pref_mem64;
if (pci_is_enabled(bridge))
return;
@@ -198,16 +198,22 @@
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
/* Set up PREF base/limit. */
+ pref_mem64 = 0;
bu = lu = 0;
pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]);
if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
+ int width = 8;
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- bu = upper_32_bits(region.start);
- lu = upper_32_bits(region.end);
- dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n",
- (unsigned long long)region.start,
- (unsigned long long)region.end);
+ if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
+ pref_mem64 = 1;
+ bu = upper_32_bits(region.start);
+ lu = upper_32_bits(region.end);
+ width = 16;
+ }
+ dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
+ width, (unsigned long long)region.start,
+ width, (unsigned long long)region.end);
}
else {
l = 0x0000fff0;
@@ -215,9 +221,11 @@
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- /* Set the upper 32 bits of PREF base & limit. */
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ if (pref_mem64) {
+ /* Set the upper 32 bits of PREF base & limit. */
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ }
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -255,8 +263,25 @@
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
}
- if (pmem)
+ if (pmem) {
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
+ b_res[2].flags |= IORESOURCE_MEM_64;
+ }
+
+ /* double check if bridge does support 64 bit pref */
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ u32 mem_base_hi, tmp;
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ &mem_base_hi);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ if (!tmp)
+ b_res[2].flags &= ~IORESOURCE_MEM_64;
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ mem_base_hi);
+ }
}
/* Helper function for sizing routines: find first available
@@ -336,6 +361,7 @@
resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
+ unsigned int mem64_mask = 0;
if (!b_res)
return 0;
@@ -344,9 +370,12 @@
max_order = 0;
size = 0;
+ mem64_mask = b_res->flags & IORESOURCE_MEM_64;
+ b_res->flags &= ~IORESOURCE_MEM_64;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
-
+
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *r = &dev->resource[i];
resource_size_t r_size;
@@ -372,6 +401,7 @@
aligns[order] += align;
if (order > max_order)
max_order = order;
+ mem64_mask &= r->flags & IORESOURCE_MEM_64;
}
}
@@ -396,6 +426,7 @@
b_res->start = min_align;
b_res->end = size + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
+ b_res->flags |= mem64_mask;
return 1;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1240351..b711fb7 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -135,23 +135,16 @@
}
#endif /* CONFIG_PCI_QUIRKS */
-int pci_assign_resource(struct pci_dev *dev, int resno)
+static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
+ int resno)
{
- struct pci_bus *bus = dev->bus;
struct resource *res = dev->resource + resno;
resource_size_t size, min, align;
int ret;
size = resource_size(res);
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
-
align = resource_alignment(res);
- if (!align) {
- dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
- "alignment) %pR flags %#lx\n",
- resno, res, res->flags);
- return -EINVAL;
- }
/* First, try exact prefetching match.. */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -169,10 +162,7 @@
pcibios_align_resource, dev);
}
- if (ret) {
- dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
- resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
- } else {
+ if (!ret) {
res->flags &= ~IORESOURCE_STARTALIGN;
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -181,6 +171,39 @@
return ret;
}
+int pci_assign_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t align;
+ struct pci_bus *bus;
+ int ret;
+
+ align = resource_alignment(res);
+ if (!align) {
+ dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
+ "alignment) %pR flags %#lx\n",
+ resno, res, res->flags);
+ return -EINVAL;
+ }
+
+ bus = dev->bus;
+ while ((ret = __pci_assign_resource(bus, dev, resno))) {
+ if (bus->parent && bus->self->transparent)
+ bus = bus->parent;
+ else
+ bus = NULL;
+ if (bus)
+ continue;
+ break;
+ }
+
+ if (ret)
+ dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
+ resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
+
+ return ret;
+}
+
#if 0
int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index fe95ce2..eddb074 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -307,6 +307,45 @@
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+#include <linux/pci_hotplug.h>
+/**
+ * pci_hp_create_link - create symbolic link to the hotplug driver module.
+ * @slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to create symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_create_module_link(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ struct kobject *kobj = NULL;
+ int no_warn;
+
+ if (!slot || !slot->ops)
+ return;
+ kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ if (!kobj)
+ return;
+ no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
+
+/**
+ * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
+ * @slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to remove symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_remove_module_link(struct pci_slot *pci_slot)
+{
+ sysfs_remove_link(&pci_slot->kobj, "module");
+}
+EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
+#endif
+
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 09a503e..be2fd6f 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -958,12 +958,12 @@
status = get_u32(&state, ACER_CAP_WIRELESS);
if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(wireless_rfkill, !!state);
+ rfkill_set_sw_state(wireless_rfkill, !state);
if (has_cap(ACER_CAP_BLUETOOTH)) {
status = get_u32(&state, ACER_CAP_BLUETOOTH);
if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(bluetooth_rfkill, !!state);
+ rfkill_set_sw_state(bluetooth_rfkill, !state);
}
schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 03bf522..8153b3e 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -180,6 +180,7 @@
*/
static int eeepc_hotk_add(struct acpi_device *device);
static int eeepc_hotk_remove(struct acpi_device *device, int type);
+static int eeepc_hotk_resume(struct acpi_device *device);
static const struct acpi_device_id eeepc_device_ids[] = {
{EEEPC_HOTK_HID, 0},
@@ -194,6 +195,7 @@
.ops = {
.add = eeepc_hotk_add,
.remove = eeepc_hotk_remove,
+ .resume = eeepc_hotk_resume,
},
};
@@ -512,15 +514,12 @@
return -1;
}
-static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+static void eeepc_rfkill_hotplug(void)
{
struct pci_dev *dev;
struct pci_bus *bus = pci_find_bus(0, 1);
bool blocked;
- if (event != ACPI_NOTIFY_BUS_CHECK)
- return;
-
if (!bus) {
printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
return;
@@ -551,6 +550,14 @@
rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
}
+static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+{
+ if (event != ACPI_NOTIFY_BUS_CHECK)
+ return;
+
+ eeepc_rfkill_hotplug();
+}
+
static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
{
static struct key_entry *key;
@@ -675,8 +682,8 @@
if (!ehotk->eeepc_wlan_rfkill)
goto wlan_fail;
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
- get_acpi(CM_ASL_WLAN) != 1);
+ rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill,
+ get_acpi(CM_ASL_WLAN) != 1);
result = rfkill_register(ehotk->eeepc_wlan_rfkill);
if (result)
goto wlan_fail;
@@ -693,8 +700,8 @@
if (!ehotk->eeepc_bluetooth_rfkill)
goto bluetooth_fail;
- rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
- get_acpi(CM_ASL_BLUETOOTH) != 1);
+ rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill,
+ get_acpi(CM_ASL_BLUETOOTH) != 1);
result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
if (result)
goto bluetooth_fail;
@@ -734,6 +741,33 @@
return 0;
}
+static int eeepc_hotk_resume(struct acpi_device *device)
+{
+ if (ehotk->eeepc_wlan_rfkill) {
+ bool wlan;
+
+ /* Workaround - it seems that _PTS disables the wireless
+ without notification or changing the value read by WLAN.
+ Normally this is fine because the correct value is restored
+ from the non-volatile storage on resume, but we need to do
+ it ourself if case suspend is aborted, or we lose wireless.
+ */
+ wlan = get_acpi(CM_ASL_WLAN);
+ set_acpi(CM_ASL_WLAN, wlan);
+
+ rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
+ wlan != 1);
+
+ eeepc_rfkill_hotplug();
+ }
+
+ if (ehotk->eeepc_bluetooth_rfkill)
+ rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
+ get_acpi(CM_ASL_BLUETOOTH) != 1);
+
+ return 0;
+}
+
/*
* Hwmon
*/
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 86e9585..40d64c0 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1163,8 +1163,8 @@
{
struct tpacpi_rfk *atp_rfk;
int res;
- bool initial_sw_state = false;
- int initial_sw_status;
+ bool sw_state = false;
+ int sw_status;
BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
@@ -1185,17 +1185,17 @@
atp_rfk->id = id;
atp_rfk->ops = tp_rfkops;
- initial_sw_status = (tp_rfkops->get_status)();
- if (initial_sw_status < 0) {
+ sw_status = (tp_rfkops->get_status)();
+ if (sw_status < 0) {
printk(TPACPI_ERR
"failed to read initial state for %s, error %d\n",
- name, initial_sw_status);
+ name, sw_status);
} else {
- initial_sw_state = (initial_sw_status == TPACPI_RFK_RADIO_OFF);
+ sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
if (set_default) {
/* try to keep the initial state, since we ask the
* firmware to preserve it across S5 in NVRAM */
- rfkill_set_sw_state(atp_rfk->rfkill, initial_sw_state);
+ rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
}
}
rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e5b84db..7498366 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -470,7 +470,7 @@
*/
static void dasd_change_state(struct dasd_device *device)
{
- int rc;
+ int rc;
if (device->state == device->target)
/* Already where we want to go today... */
@@ -479,8 +479,10 @@
rc = dasd_increase_state(device);
else
rc = dasd_decrease_state(device);
- if (rc && rc != -EAGAIN)
- device->target = device->state;
+ if (rc == -EAGAIN)
+ return;
+ if (rc)
+ device->target = device->state;
if (device->state == device->target) {
wake_up(&dasd_init_waitq);
@@ -2503,15 +2505,25 @@
if (IS_ERR(device))
return PTR_ERR(device);
+ /* allow new IO again */
+ device->stopped &= ~DASD_STOPPED_PM;
+ device->stopped &= ~DASD_UNRESUMED_PM;
+
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
if (device->discipline->restore)
rc = device->discipline->restore(device);
+ if (rc)
+ /*
+ * if the resume failed for the DASD we put it in
+ * an UNRESUMED stop state
+ */
+ device->stopped |= DASD_UNRESUMED_PM;
dasd_put_device(device);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 1c28ec3..f8b1f04 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3243,9 +3243,6 @@
int is_known, rc;
struct dasd_uid temp_uid;
- /* allow new IO again */
- device->stopped &= ~DASD_STOPPED_PM;
-
private = (struct dasd_eckd_private *) device->private;
/* Read Configuration Data */
@@ -3295,12 +3292,7 @@
return 0;
out_err:
- /*
- * if the resume failed for the DASD we put it in
- * an UNRESUMED stop state
- */
- device->stopped |= DASD_UNRESUMED_PM;
- return 0;
+ return -1;
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 04dc734..21639d6 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -20,10 +20,7 @@
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/reboot.h>
-
#include <linux/slab.h>
-#include <linux/bootmem.h>
-
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/io.h>
@@ -735,7 +732,7 @@
unsigned long flags;
/* Empty the output buffer, then prevent new I/O. */
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
raw->flags |= RAW3215_FROZEN;
@@ -749,7 +746,7 @@
unsigned long flags;
/* Allow I/O again and flush output buffer. */
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_FROZEN;
raw->flags |= RAW3215_FLUSHING;
@@ -883,7 +880,7 @@
raw3215_freelist = NULL;
spin_lock_init(&raw3215_freelist_lock);
for (i = 0; i < NR_3215_REQ; i++) {
- req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req));
+ req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
req->next = raw3215_freelist;
raw3215_freelist = req;
}
@@ -893,10 +890,9 @@
return -ENODEV;
raw3215[0] = raw = (struct raw3215_info *)
- alloc_bootmem_low(sizeof(struct raw3215_info));
- memset(raw, 0, sizeof(struct raw3215_info));
- raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
- raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
+ kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
+ raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+ raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
raw->cdev = cdev;
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
@@ -906,9 +902,9 @@
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
- free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE);
- free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
- free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
+ kfree(raw->inbuf);
+ kfree(raw->buffer);
+ kfree(raw);
raw3215[0] = NULL;
return -ENODEV;
}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 44d02e3..bb838bd 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -600,16 +599,14 @@
if (IS_ERR(rp))
return PTR_ERR(rp);
- condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270));
- memset(condev, 0, sizeof(struct con3270));
+ condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
condev->view.dev = rp;
- condev->read = raw3270_request_alloc_bootmem(0);
+ condev->read = raw3270_request_alloc(0);
condev->read->callback = con3270_read_callback;
condev->read->callback_data = condev;
- condev->write =
- raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE);
- condev->kreset = raw3270_request_alloc_bootmem(1);
+ condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
+ condev->kreset = raw3270_request_alloc(1);
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
@@ -623,7 +620,7 @@
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
- cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
+ cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
}
condev->cline = alloc_string(&condev->freemem, condev->view.cols);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 75a8831..7892550 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -320,7 +320,7 @@
goto out_path;
}
filp->private_data = monpriv;
- monreader_device->driver_data = monpriv;
+ dev_set_drvdata(&monreader_device, monpriv);
unlock_kernel();
return nonseekable_open(inode, filp);
@@ -463,7 +463,7 @@
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
- struct mon_private *monpriv = dev->driver_data;
+ struct mon_private *monpriv = dev_get_drvdata(&dev);
int rc;
if (!monpriv)
@@ -487,7 +487,7 @@
static int monreader_thaw(struct device *dev)
{
- struct mon_private *monpriv = dev->driver_data;
+ struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index acab7b2..d6a022f 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -143,33 +142,6 @@
return rq;
}
-#ifdef CONFIG_TN3270_CONSOLE
-/*
- * Allocate a new 3270 ccw request from bootmem. Only works very
- * early in the boot process. Only con3270.c should be using this.
- */
-struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
-{
- struct raw3270_request *rq;
-
- rq = alloc_bootmem_low(sizeof(struct raw3270));
-
- /* alloc output buffer. */
- if (size > 0)
- rq->buffer = alloc_bootmem_low(size);
- rq->size = size;
- INIT_LIST_HEAD(&rq->list);
-
- /*
- * Setup ccw.
- */
- rq->ccw.cda = __pa(rq->buffer);
- rq->ccw.flags = CCW_FLAG_SLI;
-
- return rq;
-}
-#endif
-
/*
* Free 3270 ccw request
*/
@@ -846,8 +818,8 @@
char *ascebc;
int rc;
- rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270));
- ascebc = (char *) alloc_bootmem(256);
+ rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+ ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
return ERR_PTR(rc);
@@ -1350,7 +1322,7 @@
struct raw3270_view *view;
unsigned long flags;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
@@ -1376,7 +1348,7 @@
struct raw3270 *rp;
unsigned long flags;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 336811a..ad698d3 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
-#include <linux/bootmem.h>
#include <linux/termios.h>
#include <linux/err.h>
#include <linux/reboot.h>
@@ -110,7 +109,7 @@
spin_lock_irqsave(&sclp_con_lock, flags);
if (timer_pending(&sclp_con_timer))
- del_timer_sync(&sclp_con_timer);
+ del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
@@ -298,8 +297,8 @@
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- list_add_tail((struct list_head *) page, &sclp_con_pages);
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ list_add_tail(page, &sclp_con_pages);
}
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 5518e24..178724f 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -20,7 +20,6 @@
#include <linux/major.h>
#include <linux/console.h>
#include <linux/kdev_t.h>
-#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/reboot.h>
@@ -601,10 +600,7 @@
list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page);
- if (slab_is_available())
- free_page((unsigned long) page);
- else
- free_bootmem((unsigned long) page, PAGE_SIZE);
+ free_page((unsigned long) page);
}
}
@@ -640,16 +636,12 @@
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
+ rc = -ENOMEM;
for (i = 0; i < num_pages; i++) {
- if (slab_is_available())
- page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- else
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- if (!page) {
- rc = -ENOMEM;
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
goto out;
- }
- list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+ list_add_tail(page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 595aa04..1d420d9 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -396,7 +396,7 @@
{
struct tape_device *device;
- device = cdev->dev.driver_data;
+ device = dev_get_drvdata(&cdev->dev);
if (!device) {
return -ENODEV;
}
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 411cfa3..c20a4fe 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -663,7 +663,7 @@
static int vmlogrdr_pm_prepare(struct device *dev)
{
int rc;
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0;
if (priv) {
@@ -753,7 +753,7 @@
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
- dev->driver_data = priv;
+ dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 7d9e67c..31b902e 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -170,7 +170,7 @@
*/
static int ur_pm_suspend(struct ccw_device *cdev)
{
- struct urdev *urd = cdev->dev.driver_data;
+ struct urdev *urd = dev_get_drvdata(&cdev->dev);
TRACE("ur_pm_suspend: cdev=%p\n", cdev);
if (urd->open_flag) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 13bcb81..b1241f8 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -351,15 +351,6 @@
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
/* prototypes for thin interrupt */
-void qdio_sync_after_thinint(struct qdio_q *q);
-int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
- int auto_ack);
-void qdio_check_outbound_after_thinint(struct qdio_q *q);
-int qdio_inbound_q_moved(struct qdio_q *q);
-void qdio_kick_handler(struct qdio_q *q);
-void qdio_stop_polling(struct qdio_q *q);
-int qdio_siga_sync_q(struct qdio_q *q);
-
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
@@ -392,4 +383,6 @@
int qdio_setup_init(void);
void qdio_setup_exit(void);
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state);
#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e3434b3..b8626d4 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -70,9 +70,8 @@
seq_printf(m, "slsb buffer states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
- qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
- get_buf_state(q, i, &state, 0);
+ debug_get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d79cf5b..0038750 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -231,8 +231,8 @@
return i;
}
-inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
- unsigned char *state, int auto_ack)
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, int auto_ack)
{
return get_buf_states(q, bufnr, state, 1, auto_ack);
}
@@ -276,7 +276,7 @@
QDIO_MAX_BUFFERS_PER_Q);
}
-static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
int cc;
@@ -293,7 +293,7 @@
return cc;
}
-inline int qdio_siga_sync_q(struct qdio_q *q)
+static inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
return qdio_siga_sync(q, 0, q->mask);
@@ -358,8 +358,7 @@
return cc;
}
-/* called from thinint inbound handler */
-void qdio_sync_after_thinint(struct qdio_q *q)
+static inline void qdio_sync_after_thinint(struct qdio_q *q)
{
if (pci_out_supported(q)) {
if (need_siga_sync_thinint(q))
@@ -370,7 +369,14 @@
qdio_siga_sync_q(q);
}
-inline void qdio_stop_polling(struct qdio_q *q)
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state)
+{
+ qdio_siga_sync_q(q);
+ return get_buf_states(q, bufnr, state, 1, 0);
+}
+
+static inline void qdio_stop_polling(struct qdio_q *q)
{
if (!q->u.in.polling)
return;
@@ -449,13 +455,6 @@
count--;
if (!count)
return;
-
- /*
- * Need to change all PRIMED buffers to NOT_INIT, otherwise
- * we're loosing initiative in the thinint code.
- */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
- count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
@@ -470,19 +469,13 @@
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /*
- * No siga sync here, as a PCI or we after a thin interrupt
- * will sync the queues.
- */
-
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
goto out;
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * already sync'ed the queues.
+ */
count = get_buf_states(q, q->first_to_check, &state, count, 1);
if (!count)
goto out;
@@ -490,14 +483,9 @@
switch (state) {
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
- /*
- * No siga-sync needed for non-qebsm here, as the inbound queue
- * will be synced on the next siga-r, resp.
- * tiqdio_is_inbound_q_done will do the siga-sync.
- */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
- goto check_next;
+ break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
@@ -516,7 +504,7 @@
return q->first_to_check;
}
-int qdio_inbound_q_moved(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q)
{
int bufnr;
@@ -524,35 +512,32 @@
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
- if (!need_siga_sync(q) && !pci_out_supported(q))
+ if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
q->u.in.timestamp = get_usecs();
-
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
return 1;
} else
return 0;
}
-static int qdio_inbound_q_done(struct qdio_q *q)
+static inline int qdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
- /*
- * We need that one for synchronization with the adapter, as it
- * does a kind of PCI avoidance.
- */
qdio_siga_sync_q(q);
-
get_buf_state(q, q->first_to_check, &state, 0);
+
if (state == SLSB_P_INPUT_PRIMED)
- /* we got something to do */
+ /* more work coming */
return 0;
- /* on VM, we don't poll, so the q is always done here */
- if (need_siga_sync(q) || pci_out_supported(q))
+ if (is_thinint_irq(q->irq_ptr))
+ return 1;
+
+ /* don't poll under z/VM */
+ if (MACHINE_IS_VM)
return 1;
/*
@@ -563,14 +548,11 @@
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
q->first_to_check);
return 1;
- } else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
- q->first_to_check);
+ } else
return 0;
- }
}
-void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q)
{
int start = q->first_to_kick;
int end = q->first_to_check;
@@ -619,7 +601,6 @@
goto again;
}
-/* inbound tasklet */
void qdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
@@ -642,11 +623,6 @@
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
return q->first_to_check;
@@ -661,13 +637,7 @@
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
- /*
- * We fetch all buffer states at once. get_buf_states may
- * return count < stop. For QEBSM we do not loop.
- */
- if (is_qebsm(q))
- break;
- goto check_next;
+ break;
case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
@@ -797,8 +767,7 @@
tasklet_schedule(&q->tasklet);
}
-/* called from thinint inbound tasklet */
-void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
{
struct qdio_q *out;
int i;
@@ -811,6 +780,46 @@
tasklet_schedule(&out->tasklet);
}
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound);
+ qdio_sync_after_thinint(q);
+
+ /*
+ * The interrupt could be caused by a PCI request. Check the
+ * PCI capable outbound queues.
+ */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_handler(q);
+
+ if (!qdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __tiqdio_inbound_processing(q);
+}
+
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
@@ -1488,18 +1497,13 @@
* @count: how many buffers to process
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count)
+ int q_nr, unsigned int bufnr, unsigned int count)
{
struct qdio_irq *irq_ptr;
- if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
- (count > QDIO_MAX_BUFFERS_PER_Q) ||
- (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
- if (!count)
- return 0;
-
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index c655d01..981a77e 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -43,9 +43,6 @@
};
static struct indicator_t *q_indicators;
-static void tiqdio_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
-
static int css_qdio_omit_svs;
static inline unsigned long do_clear_global_summary(void)
@@ -103,11 +100,6 @@
xchg(irq_ptr->dsci, 1);
}
-/*
- * we cannot stop the tiqdio tasklet here since it is for all
- * thinint qdio devices and it must run as long as there is a
- * thinint device left
- */
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
@@ -126,79 +118,39 @@
}
}
-static inline int tiqdio_inbound_q_done(struct qdio_q *q)
-{
- unsigned char state = 0;
-
- if (!atomic_read(&q->nr_buf_used))
- return 1;
-
- qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
-
- if (state == SLSB_P_INPUT_PRIMED)
- /* more work coming */
- return 0;
- return 1;
-}
-
static inline int shared_ind(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
-static void __tiqdio_inbound_processing(struct qdio_q *q)
-{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
- qdio_sync_after_thinint(q);
-
- /*
- * Maybe we have work on our outbound queues... at least
- * we have to check the PCI capable queues.
- */
- qdio_check_outbound_after_thinint(q);
-
- if (!qdio_inbound_q_moved(q))
- return;
-
- qdio_kick_handler(q);
-
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
-
- qdio_stop_polling(q);
- /*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
- */
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
-}
-
-void tiqdio_inbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
-
- __tiqdio_inbound_processing(q);
-}
-
-/* check for work on all inbound thinint queues */
-static void tiqdio_tasklet_fn(unsigned long data)
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @ind: pointer to adapter local summary indicator
+ * @drv_data: NULL
+ */
+static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
struct qdio_q *q;
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
-again:
+ qdio_perf_stat_inc(&perf_stats.thin_int);
+
+ /*
+ * SVS only when needed: issue SVS to benefit from iqdio interrupt
+ * avoidance (SVS clears adapter interrupt suppression overwrite)
+ */
+ if (!css_qdio_omit_svs)
+ do_clear_global_summary();
+
+ /*
+ * reset local summary indicator (tiqdio_alsi) to stop adapter
+ * interrupts for now
+ */
+ xchg((u8 *)ind, 0);
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
+ /* check for work on all inbound thinint queues */
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
@@ -226,37 +178,6 @@
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
}
-
- /* check for more work */
- if (*tiqdio_alsi) {
- xchg(tiqdio_alsi, 0);
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
- goto again;
- }
-}
-
-/**
- * tiqdio_thinint_handler - thin interrupt handler for qdio
- * @ind: pointer to adapter local summary indicator
- * @drv_data: NULL
- */
-static void tiqdio_thinint_handler(void *ind, void *drv_data)
-{
- qdio_perf_stat_inc(&perf_stats.thin_int);
-
- /*
- * SVS only when needed: issue SVS to benefit from iqdio interrupt
- * avoidance (SVS clears adapter interrupt suppression overwrite)
- */
- if (!css_qdio_omit_svs)
- do_clear_global_summary();
-
- /*
- * reset local summary indicator (tiqdio_alsi) to stop adapter
- * interrupts for now, the tasklet will clean all dsci's
- */
- xchg((u8 *)ind, 0);
- tasklet_hi_schedule(&tiqdio_tasklet);
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -376,5 +297,4 @@
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
- tasklet_kill(&tiqdio_tasklet);
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9c14840..727a809 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -54,6 +54,12 @@
static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long);
static inline void ap_schedule_poll_timer(void);
+static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
+static int ap_device_remove(struct device *dev);
+static int ap_device_probe(struct device *dev);
+static void ap_interrupt_handler(void *unused1, void *unused2);
+static void ap_reset(struct ap_device *ap_dev);
+static void ap_config_timeout(unsigned long ptr);
/*
* Module description.
@@ -101,6 +107,10 @@
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
static unsigned long long poll_timeout = 250000;
+/* Suspend flag */
+static int ap_suspend_flag;
+static struct bus_type ap_bus_type;
+
/**
* ap_using_interrupts() - Returns non-zero if interrupt support is
* available.
@@ -617,10 +627,79 @@
return retval;
}
+static int ap_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ unsigned long flags;
+
+ if (!ap_suspend_flag) {
+ ap_suspend_flag = 1;
+
+ /* Disable scanning for devices, thus we do not want to scan
+ * for them after removing.
+ */
+ del_timer_sync(&ap_config_timer);
+ if (ap_work_queue != NULL) {
+ destroy_workqueue(ap_work_queue);
+ ap_work_queue = NULL;
+ }
+ tasklet_disable(&ap_tasklet);
+ }
+ /* Poll on the device until all requests are finished. */
+ do {
+ flags = 0;
+ __ap_poll_device(ap_dev, &flags);
+ } while ((flags & 1) || (flags & 2));
+
+ ap_device_remove(dev);
+ return 0;
+}
+
+static int ap_bus_resume(struct device *dev)
+{
+ int rc = 0;
+ struct ap_device *ap_dev = to_ap_dev(dev);
+
+ if (ap_suspend_flag) {
+ ap_suspend_flag = 0;
+ if (!ap_interrupts_available())
+ ap_interrupt_indicator = NULL;
+ ap_device_probe(dev);
+ ap_reset(ap_dev);
+ setup_timer(&ap_dev->timeout, ap_request_timeout,
+ (unsigned long) ap_dev);
+ ap_scan_bus(NULL);
+ init_timer(&ap_config_timer);
+ ap_config_timer.function = ap_config_timeout;
+ ap_config_timer.data = 0;
+ ap_config_timer.expires = jiffies + ap_config_time * HZ;
+ add_timer(&ap_config_timer);
+ ap_work_queue = create_singlethread_workqueue("kapwork");
+ if (!ap_work_queue)
+ return -ENOMEM;
+ tasklet_enable(&ap_tasklet);
+ if (!ap_using_interrupts())
+ ap_schedule_poll_timer();
+ else
+ tasklet_schedule(&ap_tasklet);
+ if (ap_thread_flag)
+ rc = ap_poll_thread_start();
+ } else {
+ ap_device_probe(dev);
+ ap_reset(ap_dev);
+ setup_timer(&ap_dev->timeout, ap_request_timeout,
+ (unsigned long) ap_dev);
+ }
+
+ return rc;
+}
+
static struct bus_type ap_bus_type = {
.name = "ap",
.match = &ap_bus_match,
.uevent = &ap_uevent,
+ .suspend = ap_bus_suspend,
+ .resume = ap_bus_resume
};
static int ap_device_probe(struct device *dev)
@@ -1066,7 +1145,7 @@
*/
static inline void ap_schedule_poll_timer(void)
{
- if (ap_using_interrupts())
+ if (ap_using_interrupts() || ap_suspend_flag)
return;
if (hrtimer_is_queued(&ap_poll_timer))
return;
@@ -1384,6 +1463,8 @@
set_user_nice(current, 19);
while (1) {
+ if (ap_suspend_flag)
+ return 0;
if (need_resched()) {
schedule();
continue;
@@ -1414,7 +1495,7 @@
{
int rc;
- if (ap_using_interrupts())
+ if (ap_using_interrupts() || ap_suspend_flag)
return 0;
mutex_lock(&ap_poll_thread_mutex);
if (!ap_poll_kthread) {
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 52574ce..8c36eaf 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1307,7 +1307,7 @@
*/
static int netiucv_pm_freeze(struct device *dev)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL;
int rc = 0;
@@ -1331,7 +1331,7 @@
*/
static int netiucv_pm_restore_thaw(struct device *dev)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL;
int rc = 0;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 1132c5c..037c1e0 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1320,6 +1320,16 @@
If you have an SGI Altix with an IOC3 serial card,
say Y or M. Otherwise, say N.
+config SERIAL_MSM
+ bool "MSM on-chip serial port support"
+ depends on ARM && ARCH_MSM
+ select SERIAL_CORE
+
+config SERIAL_MSM_CONSOLE
+ bool "MSM serial console support"
+ depends on SERIAL_MSM=y
+ select SERIAL_CORE_CONSOLE
+
config SERIAL_NETX
tristate "NetX serial port support"
depends on ARM && ARCH_NETX
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 45a8658..d5a2998 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -71,6 +71,7 @@
obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
+obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index e2f6b1b..b4a7650 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -38,6 +38,10 @@
#include <asm/cacheflush.h>
#endif
+#ifdef CONFIG_SERIAL_BFIN_MODULE
+# undef CONFIG_EARLY_PRINTK
+#endif
+
/* UART name and device definitions */
#define BFIN_SERIAL_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
@@ -1110,6 +1114,7 @@
bfin_serial_hw_init();
for (i = 0; i < nr_active_ports; i++) {
+ spin_lock_init(&bfin_serial_ports[i].port.lock);
bfin_serial_ports[i].port.uartclk = get_sclk();
bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE;
bfin_serial_ports[i].port.ops = &bfin_serial_pops;
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
new file mode 100644
index 0000000..698048f
--- /dev/null
+++ b/drivers/serial/msm_serial.c
@@ -0,0 +1,772 @@
+/*
+ * drivers/serial/msm_serial.c - driver for msm7k serial device and console
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+# define SUPPORT_SYSRQ
+#endif
+
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include "msm_serial.h"
+
+struct msm_port {
+ struct uart_port uart;
+ char name[16];
+ struct clk *clk;
+ unsigned int imr;
+};
+
+#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
+
+static inline void msm_write(struct uart_port *port, unsigned int val,
+ unsigned int off)
+{
+ __raw_writel(val, port->membase + off);
+}
+
+static inline unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+ return __raw_readl(port->membase + off);
+}
+
+static void msm_stop_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_start_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_stop_rx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_enable_ms(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void handle_rx(struct uart_port *port)
+{
+ struct tty_struct *tty = port->info->port.tty;
+ unsigned int sr;
+
+ /*
+ * Handle overrun. My understanding of the hardware is that overrun
+ * is not tied to the RX buffer, so we handle the case out of band.
+ */
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ /* and now the main RX loop */
+ while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ unsigned int c;
+ char flag = TTY_NORMAL;
+
+ c = msm_read(port, UART_RF);
+
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ port->icount.frame++;
+ } else {
+ port->icount.rx++;
+ }
+
+ /* Mask conditions we're ignorning. */
+ sr &= port->read_status_mask;
+
+ if (sr & UART_SR_RX_BREAK) {
+ flag = TTY_BREAK;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ flag = TTY_FRAME;
+ }
+
+ if (!uart_handle_sysrq_char(port, c))
+ tty_insert_flip_char(tty, c, flag);
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
+static void handle_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int sent_tx;
+
+ if (port->x_char) {
+ msm_write(port, port->x_char, UART_TF);
+ port->icount.tx++;
+ port->x_char = 0;
+ }
+
+ while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
+ if (uart_circ_empty(xmit)) {
+ /* disable tx interrupts */
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+ break;
+ }
+
+ msm_write(port, xmit->buf[xmit->tail], UART_TF);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ sent_tx = 1;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void handle_delta_cts(struct uart_port *port)
+{
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ port->icount.cts++;
+ wake_up_interruptible(&port->info->delta_msr_wait);
+}
+
+static irqreturn_t msm_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int misr;
+
+ spin_lock(&port->lock);
+ misr = msm_read(port, UART_MISR);
+ msm_write(port, 0, UART_IMR); /* disable interrupt */
+
+ if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
+ handle_rx(port);
+ if (misr & UART_IMR_TXLEV)
+ handle_tx(port);
+ if (misr & UART_IMR_DELTA_CTS)
+ handle_delta_cts(port);
+
+ msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int msm_tx_empty(struct uart_port *port)
+{
+ return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int msm_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
+}
+
+static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int mr;
+
+ mr = msm_read(port, UART_MR1);
+
+ if (!(mctrl & TIOCM_RTS)) {
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ } else {
+ mr |= UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ }
+}
+
+static void msm_break_ctl(struct uart_port *port, int break_ctl)
+{
+ if (break_ctl)
+ msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ else
+ msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+}
+
+static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+{
+ unsigned int baud_code, rxstale, watermark;
+
+ switch (baud) {
+ case 300:
+ baud_code = UART_CSR_300;
+ rxstale = 1;
+ break;
+ case 600:
+ baud_code = UART_CSR_600;
+ rxstale = 1;
+ break;
+ case 1200:
+ baud_code = UART_CSR_1200;
+ rxstale = 1;
+ break;
+ case 2400:
+ baud_code = UART_CSR_2400;
+ rxstale = 1;
+ break;
+ case 4800:
+ baud_code = UART_CSR_4800;
+ rxstale = 1;
+ break;
+ case 9600:
+ baud_code = UART_CSR_9600;
+ rxstale = 2;
+ break;
+ case 14400:
+ baud_code = UART_CSR_14400;
+ rxstale = 3;
+ break;
+ case 19200:
+ baud_code = UART_CSR_19200;
+ rxstale = 4;
+ break;
+ case 28800:
+ baud_code = UART_CSR_28800;
+ rxstale = 6;
+ break;
+ case 38400:
+ baud_code = UART_CSR_38400;
+ rxstale = 8;
+ break;
+ case 57600:
+ baud_code = UART_CSR_57600;
+ rxstale = 16;
+ break;
+ case 115200:
+ default:
+ baud_code = UART_CSR_115200;
+ baud = 115200;
+ rxstale = 31;
+ break;
+ }
+
+ msm_write(port, baud_code, UART_CSR);
+
+ /* RX stale watermark */
+ watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark |= UART_IPR_RXSTALE_LAST;
+ watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
+ msm_write(port, watermark, UART_IPR);
+
+ /* set RX watermark */
+ watermark = (port->fifosize * 3) / 4;
+ msm_write(port, watermark, UART_RFWR);
+
+ /* set TX watermark */
+ msm_write(port, 10, UART_TFWR);
+
+ return baud;
+}
+
+static void msm_reset(struct uart_port *port)
+{
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
+
+static void msm_init_clock(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ clk_enable(msm_port->clk);
+
+ msm_write(port, 0xC0, UART_MREG);
+ msm_write(port, 0xB2, UART_NREG);
+ msm_write(port, 0x7D, UART_DREG);
+ msm_write(port, 0x1C, UART_MNDREG);
+}
+
+static int msm_startup(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int data, rfr_level;
+ int ret;
+
+ snprintf(msm_port->name, sizeof(msm_port->name),
+ "msm_serial%d", port->line);
+
+ ret = request_irq(port->irq, msm_irq, IRQF_TRIGGER_HIGH,
+ msm_port->name, port);
+ if (unlikely(ret))
+ return ret;
+
+ msm_init_clock(port);
+
+ if (likely(port->fifosize > 12))
+ rfr_level = port->fifosize - 12;
+ else
+ rfr_level = port->fifosize;
+
+ /* set automatic RFR level */
+ data = msm_read(port, UART_MR1);
+ data &= ~UART_MR1_AUTO_RFR_LEVEL1;
+ data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
+ data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, UART_MR1);
+
+ /* make sure that RXSTALE count is non-zero */
+ data = msm_read(port, UART_IPR);
+ if (unlikely(!data)) {
+ data |= UART_IPR_RXSTALE_LAST;
+ data |= UART_IPR_STALE_LSB;
+ msm_write(port, data, UART_IPR);
+ }
+
+ msm_reset(port);
+
+ msm_write(port, 0x05, UART_CR); /* enable TX & RX */
+
+ /* turn on RX and CTS interrupts */
+ msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
+ UART_IMR_CURRENT_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ return 0;
+}
+
+static void msm_shutdown(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr = 0;
+ msm_write(port, 0, UART_IMR); /* disable interrupts */
+
+ clk_disable(msm_port->clk);
+
+ free_irq(port->irq, port);
+}
+
+static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, mr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* calculate and set baud rate */
+ baud = uart_get_baud_rate(port, termios, old, 300, 115200);
+ baud = msm_set_baud_rate(port, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ /* calculate parity */
+ mr = msm_read(port, UART_MR2);
+ mr &= ~UART_MR2_PARITY_MODE;
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & PARODD)
+ mr |= UART_MR2_PARITY_MODE_ODD;
+ else if (termios->c_cflag & CMSPAR)
+ mr |= UART_MR2_PARITY_MODE_SPACE;
+ else
+ mr |= UART_MR2_PARITY_MODE_EVEN;
+ }
+
+ /* calculate bits per char */
+ mr &= ~UART_MR2_BITS_PER_CHAR;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ mr |= UART_MR2_BITS_PER_CHAR_5;
+ break;
+ case CS6:
+ mr |= UART_MR2_BITS_PER_CHAR_6;
+ break;
+ case CS7:
+ mr |= UART_MR2_BITS_PER_CHAR_7;
+ break;
+ case CS8:
+ default:
+ mr |= UART_MR2_BITS_PER_CHAR_8;
+ break;
+ }
+
+ /* calculate stop bits */
+ mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ if (termios->c_cflag & CSTOPB)
+ mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ else
+ mr |= UART_MR2_STOP_BIT_LEN_ONE;
+
+ /* set parity, bits per char, and stop bit */
+ msm_write(port, mr, UART_MR2);
+
+ /* calculate and set hardware flow control */
+ mr = msm_read(port, UART_MR1);
+ mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ if (termios->c_cflag & CRTSCTS) {
+ mr |= UART_MR1_CTS_CTL;
+ mr |= UART_MR1_RX_RDY_CTL;
+ }
+ msm_write(port, mr, UART_MR1);
+
+ /* Configure status bits to ignore based on termio flags. */
+ port->read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_SR_RX_BREAK;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *msm_type(struct uart_port *port)
+{
+ return "MSM";
+}
+
+static void msm_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *resource;
+ resource_size_t size;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return;
+ size = resource->end - resource->start + 1;
+
+ release_mem_region(port->mapbase, size);
+ iounmap(port->membase);
+ port->membase = NULL;
+}
+
+static int msm_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *resource;
+ resource_size_t size;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ size = resource->end - resource->start + 1;
+
+ if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
+ return -EBUSY;
+
+ port->membase = ioremap(port->mapbase, size);
+ if (!port->membase) {
+ release_mem_region(port->mapbase, size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void msm_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_MSM;
+ msm_request_port(port);
+ }
+}
+
+static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM))
+ return -EINVAL;
+ if (unlikely(port->irq != ser->irq))
+ return -EINVAL;
+ return 0;
+}
+
+static void msm_power(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ switch (state) {
+ case 0:
+ clk_enable(msm_port->clk);
+ break;
+ case 3:
+ clk_disable(msm_port->clk);
+ break;
+ default:
+ printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
+ }
+}
+
+static struct uart_ops msm_uart_pops = {
+ .tx_empty = msm_tx_empty,
+ .set_mctrl = msm_set_mctrl,
+ .get_mctrl = msm_get_mctrl,
+ .stop_tx = msm_stop_tx,
+ .start_tx = msm_start_tx,
+ .stop_rx = msm_stop_rx,
+ .enable_ms = msm_enable_ms,
+ .break_ctl = msm_break_ctl,
+ .startup = msm_startup,
+ .shutdown = msm_shutdown,
+ .set_termios = msm_set_termios,
+ .type = msm_type,
+ .release_port = msm_release_port,
+ .request_port = msm_request_port,
+ .config_port = msm_config_port,
+ .verify_port = msm_verify_port,
+ .pm = msm_power,
+};
+
+static struct msm_port msm_uart_ports[] = {
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 512,
+ .line = 0,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 512,
+ .line = 1,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 64,
+ .line = 2,
+ },
+ },
+};
+
+#define UART_NR ARRAY_SIZE(msm_uart_ports)
+
+static inline struct uart_port *get_port_from_line(unsigned int line)
+{
+ return &msm_uart_ports[line].uart;
+}
+
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+
+static void msm_console_putchar(struct uart_port *port, int c)
+{
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ ;
+ msm_write(port, c, UART_TF);
+}
+
+static void msm_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port;
+ struct msm_port *msm_port;
+
+ BUG_ON(co->index < 0 || co->index >= UART_NR);
+
+ port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
+
+ spin_lock(&port->lock);
+ uart_console_write(port, s, count, msm_console_putchar);
+ spin_unlock(&port->lock);
+}
+
+static int __init msm_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud, flow, bits, parity;
+
+ if (unlikely(co->index >= UART_NR || co->index < 0))
+ return -ENXIO;
+
+ port = get_port_from_line(co->index);
+
+ if (unlikely(!port->membase))
+ return -ENXIO;
+
+ port->cons = co;
+
+ msm_init_clock(port);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ bits = 8;
+ parity = 'n';
+ flow = 'n';
+ msm_write(port, UART_MR2_BITS_PER_CHAR_8 | UART_MR2_STOP_BIT_LEN_ONE,
+ UART_MR2); /* 8N1 */
+
+ if (baud < 300 || baud > 115200)
+ baud = 115200;
+ msm_set_baud_rate(port, baud);
+
+ msm_reset(port);
+
+ printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver msm_uart_driver;
+
+static struct console msm_console = {
+ .name = "ttyMSM",
+ .write = msm_console_write,
+ .device = uart_console_device,
+ .setup = msm_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &msm_uart_driver,
+};
+
+#define MSM_CONSOLE (&msm_console)
+
+#else
+#define MSM_CONSOLE NULL
+#endif
+
+static struct uart_driver msm_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "msm_serial",
+ .dev_name = "ttyMSM",
+ .nr = UART_NR,
+ .cons = MSM_CONSOLE,
+};
+
+static int __init msm_serial_probe(struct platform_device *pdev)
+{
+ struct msm_port *msm_port;
+ struct resource *resource;
+ struct uart_port *port;
+
+ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ return -ENXIO;
+
+ printk(KERN_INFO "msm_serial: detected port #%d\n", pdev->id);
+
+ port = get_port_from_line(pdev->id);
+ port->dev = &pdev->dev;
+ msm_port = UART_TO_MSM(port);
+
+ msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+ if (unlikely(IS_ERR(msm_port->clk)))
+ return PTR_ERR(msm_port->clk);
+ port->uartclk = clk_get_rate(msm_port->clk);
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ port->mapbase = resource->start;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (unlikely(port->irq < 0))
+ return -ENXIO;
+
+ platform_set_drvdata(pdev, port);
+
+ return uart_add_one_port(&msm_uart_driver, port);
+}
+
+static int __devexit msm_serial_remove(struct platform_device *pdev)
+{
+ struct msm_port *msm_port = platform_get_drvdata(pdev);
+
+ clk_put(msm_port->clk);
+
+ return 0;
+}
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_serial_probe,
+ .remove = msm_serial_remove,
+ .driver = {
+ .name = "msm_serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&msm_uart_driver);
+ if (unlikely(ret))
+ return ret;
+
+ ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe);
+ if (unlikely(ret))
+ uart_unregister_driver(&msm_uart_driver);
+
+ printk(KERN_INFO "msm_serial: driver initialized\n");
+
+ return ret;
+}
+
+static void __exit msm_serial_exit(void)
+{
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ unregister_console(&msm_console);
+#endif
+ platform_driver_unregister(&msm_platform_driver);
+ uart_unregister_driver(&msm_uart_driver);
+}
+
+module_init(msm_serial_init);
+module_exit(msm_serial_exit);
+
+MODULE_AUTHOR("Robert Love <rlove@google.com>");
+MODULE_DESCRIPTION("Driver for msm7x serial device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/msm_serial.h b/drivers/serial/msm_serial.h
new file mode 100644
index 0000000..689f1fa
--- /dev/null
+++ b/drivers/serial/msm_serial.h
@@ -0,0 +1,117 @@
+/*
+ * drivers/serial/msm_serial.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_SERIAL_MSM_SERIAL_H
+#define __DRIVERS_SERIAL_MSM_SERIAL_H
+
+#define UART_MR1 0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define UART_MR1_RX_RDY_CTL (1 << 7)
+#define UART_MR1_CTS_CTL (1 << 6)
+
+#define UART_MR2 0x0004
+#define UART_MR2_ERROR_MODE (1 << 6)
+#define UART_MR2_BITS_PER_CHAR 0x30
+#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE 0x0
+#define UART_MR2_PARITY_MODE_ODD 0x1
+#define UART_MR2_PARITY_MODE_EVEN 0x2
+#define UART_MR2_PARITY_MODE_SPACE 0x3
+#define UART_MR2_PARITY_MODE 0x3
+
+#define UART_CSR 0x0008
+#define UART_CSR_115200 0xFF
+#define UART_CSR_57600 0xEE
+#define UART_CSR_38400 0xDD
+#define UART_CSR_28800 0xCC
+#define UART_CSR_19200 0xBB
+#define UART_CSR_14400 0xAA
+#define UART_CSR_9600 0x99
+#define UART_CSR_4800 0x77
+#define UART_CSR_2400 0x55
+#define UART_CSR_1200 0x44
+#define UART_CSR_600 0x33
+#define UART_CSR_300 0x22
+
+#define UART_TF 0x000C
+
+#define UART_CR 0x0010
+#define UART_CR_CMD_NULL (0 << 4)
+#define UART_CR_CMD_RESET_RX (1 << 4)
+#define UART_CR_CMD_RESET_TX (2 << 4)
+#define UART_CR_CMD_RESET_ERR (3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define UART_CR_CMD_START_BREAK (5 << 4)
+#define UART_CR_CMD_STOP_BREAK (6 << 4)
+#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_PACKET_MODE (9 << 4)
+#define UART_CR_CMD_MODE_RESET (12 << 4)
+#define UART_CR_CMD_SET_RFR (13 << 4)
+#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_TX_DISABLE (1 << 3)
+#define UART_CR_TX_ENABLE (1 << 3)
+#define UART_CR_RX_DISABLE (1 << 3)
+#define UART_CR_RX_ENABLE (1 << 3)
+
+#define UART_IMR 0x0014
+#define UART_IMR_TXLEV (1 << 0)
+#define UART_IMR_RXSTALE (1 << 3)
+#define UART_IMR_RXLEV (1 << 4)
+#define UART_IMR_DELTA_CTS (1 << 5)
+#define UART_IMR_CURRENT_CTS (1 << 6)
+
+#define UART_IPR_RXSTALE_LAST 0x20
+#define UART_IPR_STALE_LSB 0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+
+#define UART_IPR 0x0018
+#define UART_TFWR 0x001C
+#define UART_RFWR 0x0020
+#define UART_HCR 0x0024
+
+#define UART_MREG 0x0028
+#define UART_NREG 0x002C
+#define UART_DREG 0x0030
+#define UART_MNDREG 0x0034
+#define UART_IRDA 0x0038
+#define UART_MISR_MODE 0x0040
+#define UART_MISR_RESET 0x0044
+#define UART_MISR_EXPORT 0x0048
+#define UART_MISR_VAL 0x004C
+#define UART_TEST_CTRL 0x0050
+
+#define UART_SR 0x0008
+#define UART_SR_HUNT_CHAR (1 << 7)
+#define UART_SR_RX_BREAK (1 << 6)
+#define UART_SR_PAR_FRAME_ERR (1 << 5)
+#define UART_SR_OVERRUN (1 << 4)
+#define UART_SR_TX_EMPTY (1 << 3)
+#define UART_SR_TX_READY (1 << 2)
+#define UART_SR_RX_FULL (1 << 1)
+#define UART_SR_RX_READY (1 << 0)
+
+#define UART_RF 0x000C
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+
+#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c
index 4873f29..fb00ed5 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/serial/s3c2400.c
@@ -78,7 +78,7 @@
static struct platform_driver s3c2400_serial_drv = {
.probe = s3c2400_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2400-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 87c182e..b5d7cbc 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -90,7 +90,7 @@
static struct platform_driver s3c2410_serial_drv = {
.probe = s3c2410_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2410-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c
index fd017b3..11dcb90 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/serial/s3c2412.c
@@ -123,7 +123,7 @@
static struct platform_driver s3c2412_serial_drv = {
.probe = s3c2412_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2412-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 29cbb0a..06c5b0c 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -153,7 +153,7 @@
static struct platform_driver s3c2440_serial_drv = {
.probe = s3c2440_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2440-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c
index ebf2fd3..786a067 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/serial/s3c24a0.c
@@ -94,7 +94,7 @@
static struct platform_driver s3c24a0_serial_drv = {
.probe = s3c24a0_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c24a0-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c
index 3e37852..48f1a37 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/serial/s3c6400.c
@@ -124,7 +124,7 @@
static struct platform_driver s3c6400_serial_drv = {
.probe = s3c6400_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c6400-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 93b5d75..c8851a0 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -1174,7 +1174,7 @@
EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
-int s3c24xx_serial_remove(struct platform_device *dev)
+int __devexit s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
diff --git a/drivers/serial/samsung.h b/drivers/serial/samsung.h
index 7afb948..d3fe315 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/serial/samsung.h
@@ -72,7 +72,7 @@
extern int s3c24xx_serial_probe(struct platform_device *dev,
struct s3c24xx_uart_info *uart);
-extern int s3c24xx_serial_remove(struct platform_device *dev);
+extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
struct s3c24xx_uart_info *uart);
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
index a4fb343a..319e8b8 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/serial/sb1250-duart.c
@@ -204,7 +204,7 @@
{
int loops = 10000;
- while (sbd_receive_ready(sport) && loops--)
+ while (sbd_receive_ready(sport) && --loops)
read_sbdchn(sport, R_DUART_RX_HOLD);
return loops;
}
@@ -218,7 +218,7 @@
{
int loops = 10000;
- while (!sbd_transmit_ready(sport) && loops--)
+ while (!sbd_transmit_ready(sport) && --loops)
udelay(2);
return loops;
}
@@ -232,7 +232,7 @@
{
int loops = 10000;
- while (!sbd_transmit_empty(sport) && loops--)
+ while (!sbd_transmit_empty(sport) && --loops)
udelay(2);
return loops;
}
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index a94a2ab..1df5325 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -461,7 +461,7 @@
break;
udelay(1);
}
- if (limit <= 0)
+ if (limit < 0)
break;
page_bytes -= written;
ra += written;
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index ac9e5d5..063a313 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -33,29 +33,29 @@
struct uart_port port;
struct tasklet_struct tasklet;
int usedma;
- u8 last_ier;
+ u32 last_ier;
struct platform_device *dev;
};
static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
921600, 1843200, 3250000};
-static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier);
static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
static void timbuart_stop_rx(struct uart_port *port)
{
/* spin lock held by upper layer, disable all RX interrupts */
- u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
- iowrite8(ier, port->membase + TIMBUART_IER);
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~RXFLAGS;
+ iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_stop_tx(struct uart_port *port)
{
/* spinlock held by upper layer, disable TX interrupt */
- u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
- iowrite8(ier, port->membase + TIMBUART_IER);
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~TXBAE;
+ iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_start_tx(struct uart_port *port)
@@ -72,14 +72,14 @@
u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
iowrite8(ctl, port->membase + TIMBUART_CTRL);
- iowrite8(TXBF, port->membase + TIMBUART_ISR);
+ iowrite32(TXBF, port->membase + TIMBUART_ISR);
}
static void timbuart_rx_chars(struct uart_port *port)
{
struct tty_struct *tty = port->info->port.tty;
- while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
+ while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
port->icount.rx++;
tty_insert_flip_char(tty, ch, TTY_NORMAL);
@@ -97,7 +97,7 @@
{
struct circ_buf *xmit = &port->info->xmit;
- while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
+ while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) &&
!uart_circ_empty(xmit)) {
iowrite8(xmit->buf[xmit->tail],
port->membase + TIMBUART_TXFIFO);
@@ -114,7 +114,7 @@
ioread8(port->membase + TIMBUART_BAUDRATE));
}
-static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
+static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier)
{
struct timbuart_port *uart =
container_of(port, struct timbuart_port, port);
@@ -129,7 +129,7 @@
if (isr & TXFLAGS) {
timbuart_tx_chars(port);
/* clear all TX interrupts */
- iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
+ iowrite32(TXFLAGS, port->membase + TIMBUART_ISR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -148,7 +148,7 @@
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
-void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
+void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
{
if (isr & RXFLAGS) {
/* Some RX status is set */
@@ -161,7 +161,7 @@
timbuart_rx_chars(port);
/* ack all RX interrupts */
- iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
+ iowrite32(RXFLAGS, port->membase + TIMBUART_ISR);
}
/* always have the RX interrupts enabled */
@@ -173,11 +173,11 @@
void timbuart_tasklet(unsigned long arg)
{
struct timbuart_port *uart = (struct timbuart_port *)arg;
- u8 isr, ier = 0;
+ u32 isr, ier = 0;
spin_lock(&uart->port.lock);
- isr = ioread8(uart->port.membase + TIMBUART_ISR);
+ isr = ioread32(uart->port.membase + TIMBUART_ISR);
dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
if (!uart->usedma)
@@ -188,7 +188,7 @@
if (!uart->usedma)
timbuart_handle_rx_port(&uart->port, isr, &ier);
- iowrite8(ier, uart->port.membase + TIMBUART_IER);
+ iowrite32(ier, uart->port.membase + TIMBUART_IER);
spin_unlock(&uart->port.lock);
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
@@ -196,9 +196,9 @@
static unsigned int timbuart_tx_empty(struct uart_port *port)
{
- u8 isr = ioread8(port->membase + TIMBUART_ISR);
+ u32 isr = ioread32(port->membase + TIMBUART_ISR);
- return (isr & TXBAE) ? TIOCSER_TEMT : 0;
+ return (isr & TXBE) ? TIOCSER_TEMT : 0;
}
static unsigned int timbuart_get_mctrl(struct uart_port *port)
@@ -222,13 +222,13 @@
iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
}
-static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
{
unsigned int cts;
if (isr & CTS_DELTA) {
/* ack */
- iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
+ iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR);
cts = timbuart_get_mctrl(port);
uart_handle_cts_change(port, cts & TIOCM_CTS);
wake_up_interruptible(&port->info->delta_msr_wait);
@@ -255,9 +255,9 @@
dev_dbg(port->dev, "%s\n", __func__);
iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
- iowrite8(0xff, port->membase + TIMBUART_ISR);
+ iowrite32(0x1ff, port->membase + TIMBUART_ISR);
/* Enable all but TX interrupts */
- iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
+ iowrite32(RXBAF | RXBF | RXTT | CTS_DELTA,
port->membase + TIMBUART_IER);
return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
@@ -270,7 +270,7 @@
container_of(port, struct timbuart_port, port);
dev_dbg(port->dev, "%s\n", __func__);
free_irq(port->irq, uart);
- iowrite8(0, port->membase + TIMBUART_IER);
+ iowrite32(0, port->membase + TIMBUART_IER);
}
static int get_bindex(int baud)
@@ -359,10 +359,10 @@
struct timbuart_port *uart = (struct timbuart_port *)devid;
if (ioread8(uart->port.membase + TIMBUART_IPR)) {
- uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
+ uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER);
/* disable interrupts, the tasklet enables them again */
- iowrite8(0, uart->port.membase + TIMBUART_IER);
+ iowrite32(0, uart->port.membase + TIMBUART_IER);
/* fire off bottom half */
tasklet_schedule(&uart->tasklet);
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
index 9e6a873..d8c2809 100644
--- a/drivers/serial/zs.c
+++ b/drivers/serial/zs.c
@@ -231,7 +231,7 @@
{
int loops = 10000;
- while ((read_zsreg(zport, R0) & Rx_CH_AV) && loops--)
+ while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops)
read_zsdata(zport);
return loops;
}
@@ -241,7 +241,7 @@
struct zs_scc *scc = zport->scc;
int loops = 10000;
- while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && loops--) {
+ while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
@@ -254,7 +254,7 @@
struct zs_scc *scc = zport->scc;
int loops = 10000;
- while (!(read_zsreg(zport, R1) & ALL_SNT) && loops--) {
+ while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 581232b..90b29b5 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -284,21 +284,12 @@
return;
}
-static void ProcessRxChar(struct usb_serial_port *port, unsigned char Data)
+static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
+ unsigned char data)
{
- struct tty_struct *tty;
struct urb *urb = port->read_urb;
- tty = tty_port_tty_get(&port->port);
-
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
-
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, &Data, 1);
- /* tty_flip_buffer_push(tty); */
- }
-
- return;
+ if (urb->actual_length)
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
}
static void qt_write_bulk_callback(struct urb *urb)
@@ -435,8 +426,10 @@
case 0xff:
dbg("No status sequence. \n");
- ProcessRxChar(port, data[i]);
- ProcessRxChar(port, data[i + 1]);
+ if (tty) {
+ ProcessRxChar(tty, port, data[i]);
+ ProcessRxChar(tty, port, data[i + 1]);
+ }
i += 2;
break;
}
@@ -444,10 +437,8 @@
continue;
}
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, (data + i), 1);
- }
+ if (tty && urb->actual_length)
+ tty_insert_flip_char(tty, data[i], TTY_NORMAL);
}
tty_flip_buffer_push(tty);
diff --git a/fs/Kconfig b/fs/Kconfig
index d78e950..a97263b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -236,10 +236,12 @@
config LOCKD
tristate
+ depends on FILE_LOCKING
config LOCKD_V4
bool
depends on NFSD_V3 || NFS_V3
+ depends on FILE_LOCKING
default y
config EXPORTFS
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 1dd96d4..47d4a01 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -52,6 +52,19 @@
.d_delete = anon_inodefs_delete_dentry,
};
+/*
+ * nop .set_page_dirty method so that people can use .page_mkwrite on
+ * anon inodes.
+ */
+static int anon_set_page_dirty(struct page *page)
+{
+ return 0;
+};
+
+static const struct address_space_operations anon_aops = {
+ .set_page_dirty = anon_set_page_dirty,
+};
+
/**
* anon_inode_getfd - creates a new file instance by hooking it up to an
* anonymous inode, and a dentry that describe the "class"
@@ -151,6 +164,8 @@
inode->i_fop = &anon_inode_fops;
+ inode->i_mapping->a_ops = &anon_aops;
+
/*
* Mark the inode dirty from the very beginning,
* that way it will never be moved to the dirty
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c5ded5f..c135202 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -94,7 +94,6 @@
#include <linux/atm_tcp.h>
#include <linux/sonet.h>
#include <linux/atm_suni.h>
-#include <linux/mtd/mtd.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
@@ -1405,46 +1404,6 @@
#define HIDPGETCONNLIST _IOR('H', 210, int)
#define HIDPGETCONNINFO _IOR('H', 211, int)
-struct mtd_oob_buf32 {
- u_int32_t start;
- u_int32_t length;
- compat_caddr_t ptr; /* unsigned char* */
-};
-
-#define MEMWRITEOOB32 _IOWR('M',3,struct mtd_oob_buf32)
-#define MEMREADOOB32 _IOWR('M',4,struct mtd_oob_buf32)
-
-static int mtd_rw_oob(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct mtd_oob_buf __user *buf = compat_alloc_user_space(sizeof(*buf));
- struct mtd_oob_buf32 __user *buf32 = compat_ptr(arg);
- u32 data;
- char __user *datap;
- unsigned int real_cmd;
- int err;
-
- real_cmd = (cmd == MEMREADOOB32) ?
- MEMREADOOB : MEMWRITEOOB;
-
- if (copy_in_user(&buf->start, &buf32->start,
- 2 * sizeof(u32)) ||
- get_user(data, &buf32->ptr))
- return -EFAULT;
- datap = compat_ptr(data);
- if (put_user(datap, &buf->ptr))
- return -EFAULT;
-
- err = sys_ioctl(fd, real_cmd, (unsigned long) buf);
-
- if (!err) {
- if (copy_in_user(&buf32->start, &buf->start,
- 2 * sizeof(u32)))
- err = -EFAULT;
- }
-
- return err;
-}
-
#ifdef CONFIG_BLOCK
struct raw32_config_request
{
@@ -2426,15 +2385,6 @@
COMPATIBLE_IOCTL(USBDEVFS_REAPURB32)
COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32)
COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT)
-/* MTD */
-COMPATIBLE_IOCTL(MEMGETINFO)
-COMPATIBLE_IOCTL(MEMERASE)
-COMPATIBLE_IOCTL(MEMLOCK)
-COMPATIBLE_IOCTL(MEMUNLOCK)
-COMPATIBLE_IOCTL(MEMGETREGIONCOUNT)
-COMPATIBLE_IOCTL(MEMGETREGIONINFO)
-COMPATIBLE_IOCTL(MEMGETBADBLOCK)
-COMPATIBLE_IOCTL(MEMSETBADBLOCK)
/* NBD */
ULONG_IOCTL(NBD_SET_SOCK)
ULONG_IOCTL(NBD_SET_BLKSIZE)
@@ -2544,8 +2494,6 @@
COMPATIBLE_IOCTL(JSIOCGNAME(0))
/* now things that need handlers */
-HANDLE_IOCTL(MEMREADOOB32, mtd_rw_oob)
-HANDLE_IOCTL(MEMWRITEOOB32, mtd_rw_oob)
#ifdef CONFIG_NET
HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 1d437de..7515e73 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -196,7 +196,7 @@
if (c->nextblock) {
ret = file_dirty(c, c->nextblock);
if (ret)
- return ret;
+ goto out;
/* deleting summary information of the old nextblock */
jffs2_sum_reset_collected(c->summary);
}
@@ -207,7 +207,7 @@
} else {
ret = file_dirty(c, jeb);
if (ret)
- return ret;
+ goto out;
}
break;
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index dd79570..f2fdcbc 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -126,7 +126,6 @@
struct nlm_lock *lock = &argp->lock;
nlmclnt_next_cookie(&argp->cookie);
- argp->state = nsm_local_state;
memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
lock->caller = utsname()->nodename;
lock->oh.data = req->a_owner;
@@ -165,6 +164,7 @@
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
+ lock_kernel();
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
@@ -178,6 +178,7 @@
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
+ unlock_kernel();
dprintk("lockd: clnt proc returns %d\n", status);
return status;
@@ -519,6 +520,7 @@
if (nsm_monitor(host) < 0)
goto out;
+ req->a_args.state = nsm_local_state;
fl->fl_flags |= FL_ACCESS;
status = do_vfs_lock(fl);
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 6d5d4a4..7fce1b5 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -53,7 +53,7 @@
/*
* Local NSM state
*/
-int __read_mostly nsm_local_state;
+u32 __read_mostly nsm_local_state;
int __read_mostly nsm_use_hostnames;
static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
@@ -112,6 +112,7 @@
.program = &nsm_program,
.version = NSM_VERSION,
.authflavor = RPC_AUTH_NULL,
+ .flags = RPC_CLNT_CREATE_NOPING,
};
return rpc_create(&args);
@@ -184,13 +185,19 @@
nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
status = nsm_mon_unmon(nsm, NSMPROC_MON, &res);
- if (res.status != 0)
+ if (unlikely(res.status != 0))
status = -EIO;
- if (status < 0)
+ if (unlikely(status < 0)) {
printk(KERN_NOTICE "lockd: cannot monitor %s\n", nsm->sm_name);
- else
- nsm->sm_monitored = 1;
- return status;
+ return status;
+ }
+
+ nsm->sm_monitored = 1;
+ if (unlikely(nsm_local_state != res.state)) {
+ nsm_local_state = res.state;
+ dprintk("lockd: NSM state changed to %d\n", nsm_local_state);
+ }
+ return 0;
}
/**
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 83ee342..e577a78 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -326,6 +326,8 @@
{
if (call->a_args.lock.oh.data != call->a_owner)
kfree(call->a_args.lock.oh.data);
+
+ locks_release_private(&call->a_args.lock.fl);
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index ec3deea..b6440f5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -151,7 +151,7 @@
return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
}
-static void locks_release_private(struct file_lock *fl)
+void locks_release_private(struct file_lock *fl)
{
if (fl->fl_ops) {
if (fl->fl_ops->fl_release_private)
@@ -165,6 +165,7 @@
}
}
+EXPORT_SYMBOL_GPL(locks_release_private);
/* Free a lock which is not in use. */
static void locks_free_lock(struct file_lock *fl)
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index cb7fdd1..9dcf95b4 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -1,3 +1,6 @@
+#ifndef FS_MINIX_H
+#define FS_MINIX_H
+
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/minix_fs.h>
@@ -86,3 +89,5 @@
{
return list_entry(inode, struct minix_inode_info, vfs_inode);
}
+
+#endif /* FS_MINIX_H */
diff --git a/fs/namespace.c b/fs/namespace.c
index 2dd333b..a7bea8c 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1937,6 +1937,21 @@
return retval;
}
+static struct mnt_namespace *alloc_mnt_ns(void)
+{
+ struct mnt_namespace *new_ns;
+
+ new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
+ if (!new_ns)
+ return ERR_PTR(-ENOMEM);
+ atomic_set(&new_ns->count, 1);
+ new_ns->root = NULL;
+ INIT_LIST_HEAD(&new_ns->list);
+ init_waitqueue_head(&new_ns->poll);
+ new_ns->event = 0;
+ return new_ns;
+}
+
/*
* Allocate a new namespace structure and populate it with contents
* copied from the namespace of the passed in task structure.
@@ -1948,14 +1963,9 @@
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
struct vfsmount *p, *q;
- new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
- if (!new_ns)
- return ERR_PTR(-ENOMEM);
-
- atomic_set(&new_ns->count, 1);
- INIT_LIST_HEAD(&new_ns->list);
- init_waitqueue_head(&new_ns->poll);
- new_ns->event = 0;
+ new_ns = alloc_mnt_ns();
+ if (IS_ERR(new_ns))
+ return new_ns;
down_write(&namespace_sem);
/* First pass: copy the tree topology */
@@ -2019,6 +2029,24 @@
return new_ns;
}
+/**
+ * create_mnt_ns - creates a private namespace and adds a root filesystem
+ * @mnt: pointer to the new root filesystem mountpoint
+ */
+struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
+{
+ struct mnt_namespace *new_ns;
+
+ new_ns = alloc_mnt_ns();
+ if (!IS_ERR(new_ns)) {
+ mnt->mnt_ns = new_ns;
+ new_ns->root = mnt;
+ list_add(&new_ns->list, &new_ns->root->mnt_list);
+ }
+ return new_ns;
+}
+EXPORT_SYMBOL(create_mnt_ns);
+
SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
char __user *, type, unsigned long, flags, void __user *, data)
{
@@ -2246,10 +2274,14 @@
init_mount_tree();
}
-void __put_mnt_ns(struct mnt_namespace *ns)
+void put_mnt_ns(struct mnt_namespace *ns)
{
- struct vfsmount *root = ns->root;
+ struct vfsmount *root;
LIST_HEAD(umount_list);
+
+ if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock))
+ return;
+ root = ns->root;
ns->root = NULL;
spin_unlock(&vfsmount_lock);
down_write(&namespace_sem);
@@ -2260,3 +2292,4 @@
release_mounts(&umount_list);
kfree(ns);
}
+EXPORT_SYMBOL(put_mnt_ns);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index e67f3ec..2a77bc2 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -1,6 +1,6 @@
config NFS_FS
tristate "NFS client support"
- depends on INET
+ depends on INET && FILE_LOCKING
select LOCKD
select SUNRPC
select NFS_ACL_SUPPORT if NFS_V3_ACL
@@ -74,6 +74,15 @@
If unsure, say N.
+config NFS_V4_1
+ bool "NFS client support for NFSv4.1 (DEVELOPER ONLY)"
+ depends on NFS_V4 && EXPERIMENTAL
+ help
+ This option enables support for minor version 1 of the NFSv4 protocol
+ (draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
+
+ Unless you're an NFS developer, say N.
+
config ROOT_NFS
bool "Root file system on NFS"
depends on NFS_FS=y && IP_PNP
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index a886e69..7f604c7 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -17,6 +17,9 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/sunrpc/svcauth_gss.h>
+#if defined(CONFIG_NFS_V4_1)
+#include <linux/sunrpc/bc_xprt.h>
+#endif
#include <net/inet_sock.h>
@@ -28,11 +31,12 @@
struct nfs_callback_data {
unsigned int users;
+ struct svc_serv *serv;
struct svc_rqst *rqst;
struct task_struct *task;
};
-static struct nfs_callback_data nfs_callback_info;
+static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
@@ -56,10 +60,10 @@
&nfs_callback_set_tcpport, 0644);
/*
- * This is the callback kernel thread.
+ * This is the NFSv4 callback kernel thread.
*/
static int
-nfs_callback_svc(void *vrqstp)
+nfs4_callback_svc(void *vrqstp)
{
int err, preverr = 0;
struct svc_rqst *rqstp = vrqstp;
@@ -97,20 +101,12 @@
}
/*
- * Bring up the callback thread if it is not already up.
+ * Prepare to bring up the NFSv4 callback service
*/
-int nfs_callback_up(void)
+struct svc_rqst *
+nfs4_callback_up(struct svc_serv *serv)
{
- struct svc_serv *serv = NULL;
- int ret = 0;
-
- mutex_lock(&nfs_callback_mutex);
- if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
- goto out;
- serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
- ret = -ENOMEM;
- if (!serv)
- goto out_err;
+ int ret;
ret = svc_create_xprt(serv, "tcp", PF_INET,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
@@ -127,27 +123,174 @@
nfs_callback_tcpport6 = ret;
dprintk("NFS: Callback listener port = %u (af %u)\n",
nfs_callback_tcpport6, PF_INET6);
- } else if (ret != -EAFNOSUPPORT)
+ } else if (ret == -EAFNOSUPPORT)
+ ret = 0;
+ else
goto out_err;
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
- nfs_callback_info.rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
- if (IS_ERR(nfs_callback_info.rqst)) {
- ret = PTR_ERR(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
+ return svc_prepare_thread(serv, &serv->sv_pools[0]);
+
+out_err:
+ if (ret == 0)
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * The callback service for NFSv4.1 callbacks
+ */
+static int
+nfs41_callback_svc(void *vrqstp)
+{
+ struct svc_rqst *rqstp = vrqstp;
+ struct svc_serv *serv = rqstp->rq_server;
+ struct rpc_rqst *req;
+ int error;
+ DEFINE_WAIT(wq);
+
+ set_freezable();
+
+ /*
+ * FIXME: do we really need to run this under the BKL? If so, please
+ * add a comment about what it's intended to protect.
+ */
+ lock_kernel();
+ while (!kthread_should_stop()) {
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+ if (!list_empty(&serv->sv_cb_list)) {
+ req = list_first_entry(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
+ list_del(&req->rq_bc_list);
+ spin_unlock_bh(&serv->sv_cb_lock);
+ dprintk("Invoking bc_svc_process()\n");
+ error = bc_svc_process(serv, req, rqstp);
+ dprintk("bc_svc_process() returned w/ error code= %d\n",
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+ schedule();
+ }
+ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
+ unlock_kernel();
+ return 0;
+}
+
+/*
+ * Bring up the NFSv4.1 callback service
+ */
+struct svc_rqst *
+nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
+{
+ struct svc_xprt *bc_xprt;
+ struct svc_rqst *rqstp = ERR_PTR(-ENOMEM);
+
+ dprintk("--> %s\n", __func__);
+ /* Create a svc_sock for the service */
+ bc_xprt = svc_sock_create(serv, xprt->prot);
+ if (!bc_xprt)
+ goto out;
+
+ /*
+ * Save the svc_serv in the transport so that it can
+ * be referenced when the session backchannel is initialized
+ */
+ serv->bc_xprt = bc_xprt;
+ xprt->bc_serv = serv;
+
+ INIT_LIST_HEAD(&serv->sv_cb_list);
+ spin_lock_init(&serv->sv_cb_lock);
+ init_waitqueue_head(&serv->sv_cb_waitq);
+ rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
+ if (IS_ERR(rqstp))
+ svc_sock_destroy(bc_xprt);
+out:
+ dprintk("--> %s return %p\n", __func__, rqstp);
+ return rqstp;
+}
+
+static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
+ struct svc_serv *serv, struct rpc_xprt *xprt,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ if (minorversion) {
+ *rqstpp = nfs41_callback_up(serv, xprt);
+ *callback_svc = nfs41_callback_svc;
+ }
+ return minorversion;
+}
+
+static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
+ struct nfs_callback_data *cb_info)
+{
+ if (minorversion)
+ xprt->bc_serv = cb_info->serv;
+}
+#else
+static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
+ struct svc_serv *serv, struct rpc_xprt *xprt,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ return 0;
+}
+
+static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
+ struct nfs_callback_data *cb_info)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Bring up the callback thread if it is not already up.
+ */
+int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
+{
+ struct svc_serv *serv = NULL;
+ struct svc_rqst *rqstp;
+ int (*callback_svc)(void *vrqstp);
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+ char svc_name[12];
+ int ret = 0;
+ int minorversion_setup;
+
+ mutex_lock(&nfs_callback_mutex);
+ if (cb_info->users++ || cb_info->task != NULL) {
+ nfs_callback_bc_serv(minorversion, xprt, cb_info);
+ goto out;
+ }
+ serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
+ if (!serv) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion,
+ serv, xprt, &rqstp, &callback_svc);
+ if (!minorversion_setup) {
+ /* v4.0 callback setup */
+ rqstp = nfs4_callback_up(serv);
+ callback_svc = nfs4_callback_svc;
+ }
+
+ if (IS_ERR(rqstp)) {
+ ret = PTR_ERR(rqstp);
goto out_err;
}
svc_sock_update_bufs(serv);
- nfs_callback_info.task = kthread_run(nfs_callback_svc,
- nfs_callback_info.rqst,
- "nfsv4-svc");
- if (IS_ERR(nfs_callback_info.task)) {
- ret = PTR_ERR(nfs_callback_info.task);
- svc_exit_thread(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
- nfs_callback_info.task = NULL;
+ sprintf(svc_name, "nfsv4.%u-svc", minorversion);
+ cb_info->serv = serv;
+ cb_info->rqst = rqstp;
+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
+ if (IS_ERR(cb_info->task)) {
+ ret = PTR_ERR(cb_info->task);
+ svc_exit_thread(cb_info->rqst);
+ cb_info->rqst = NULL;
+ cb_info->task = NULL;
goto out_err;
}
out:
@@ -164,22 +307,25 @@
out_err:
dprintk("NFS: Couldn't create callback socket or server thread; "
"err = %d\n", ret);
- nfs_callback_info.users--;
+ cb_info->users--;
goto out;
}
/*
* Kill the callback thread if it's no longer being used.
*/
-void nfs_callback_down(void)
+void nfs_callback_down(int minorversion)
{
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+
mutex_lock(&nfs_callback_mutex);
- nfs_callback_info.users--;
- if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL) {
- kthread_stop(nfs_callback_info.task);
- svc_exit_thread(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
- nfs_callback_info.task = NULL;
+ cb_info->users--;
+ if (cb_info->users == 0 && cb_info->task != NULL) {
+ kthread_stop(cb_info->task);
+ svc_exit_thread(cb_info->rqst);
+ cb_info->serv = NULL;
+ cb_info->rqst = NULL;
+ cb_info->task = NULL;
}
mutex_unlock(&nfs_callback_mutex);
}
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index e110e28..07baa82 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -20,13 +20,24 @@
enum nfs4_callback_opnum {
OP_CB_GETATTR = 3,
OP_CB_RECALL = 4,
+/* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
OP_CB_ILLEGAL = 10044,
};
struct cb_compound_hdr_arg {
unsigned int taglen;
const char *tag;
- unsigned int callback_ident;
+ unsigned int minorversion;
unsigned nops;
};
@@ -59,16 +70,59 @@
uint32_t truncate;
};
+#if defined(CONFIG_NFS_V4_1)
+
+struct referring_call {
+ uint32_t rc_sequenceid;
+ uint32_t rc_slotid;
+};
+
+struct referring_call_list {
+ struct nfs4_sessionid rcl_sessionid;
+ uint32_t rcl_nrefcalls;
+ struct referring_call *rcl_refcalls;
+};
+
+struct cb_sequenceargs {
+ struct sockaddr *csa_addr;
+ struct nfs4_sessionid csa_sessionid;
+ uint32_t csa_sequenceid;
+ uint32_t csa_slotid;
+ uint32_t csa_highestslotid;
+ uint32_t csa_cachethis;
+ uint32_t csa_nrclists;
+ struct referring_call_list *csa_rclists;
+};
+
+struct cb_sequenceres {
+ __be32 csr_status;
+ struct nfs4_sessionid csr_sessionid;
+ uint32_t csr_sequenceid;
+ uint32_t csr_slotid;
+ uint32_t csr_highestslotid;
+ uint32_t csr_target_highestslotid;
+};
+
+extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res);
+
+#endif /* CONFIG_NFS_V4_1 */
+
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
#ifdef CONFIG_NFS_V4
-extern int nfs_callback_up(void);
-extern void nfs_callback_down(void);
-#else
-#define nfs_callback_up() (0)
-#define nfs_callback_down() do {} while(0)
-#endif
+extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
+extern void nfs_callback_down(int minorversion);
+#endif /* CONFIG_NFS_V4 */
+
+/*
+ * nfs41: Callbacks are expected to not cause substantial latency,
+ * so we limit their concurrency to 1 by setting up the maximum number
+ * of slots for the backchannel.
+ */
+#define NFS41_BC_MIN_CALLBACKS 1
+#define NFS41_BC_MAX_CALLBACKS 1
extern unsigned int nfs_callback_set_tcpport;
extern unsigned short nfs_callback_tcpport;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index f7e83e2..b7da1f5 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -101,3 +101,130 @@
dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
return res;
}
+
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * Validate the sequenceID sent by the server.
+ * Return success if the sequenceID is one more than what we last saw on
+ * this slot, accounting for wraparound. Increments the slot's sequence.
+ *
+ * We don't yet implement a duplicate request cache, so at this time
+ * we will log replays, and process them as if we had not seen them before,
+ * but we don't bump the sequence in the slot. Not too worried about it,
+ * since we only currently implement idempotent callbacks anyway.
+ *
+ * We have a single slot backchannel at this time, so we don't bother
+ * checking the used_slots bit array on the table. The lower layer guarantees
+ * a single outstanding callback request at a time.
+ */
+static int
+validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid)
+{
+ struct nfs4_slot *slot;
+
+ dprintk("%s enter. slotid %d seqid %d\n",
+ __func__, slotid, seqid);
+
+ if (slotid > NFS41_BC_MAX_CALLBACKS)
+ return htonl(NFS4ERR_BADSLOT);
+
+ slot = tbl->slots + slotid;
+ dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
+
+ /* Normal */
+ if (likely(seqid == slot->seq_nr + 1)) {
+ slot->seq_nr++;
+ return htonl(NFS4_OK);
+ }
+
+ /* Replay */
+ if (seqid == slot->seq_nr) {
+ dprintk("%s seqid %d is a replay - no DRC available\n",
+ __func__, seqid);
+ return htonl(NFS4_OK);
+ }
+
+ /* Wraparound */
+ if (seqid == 1 && (slot->seq_nr + 1) == 0) {
+ slot->seq_nr = 1;
+ return htonl(NFS4_OK);
+ }
+
+ /* Misordered request */
+ return htonl(NFS4ERR_SEQ_MISORDERED);
+}
+
+/*
+ * Returns a pointer to a held 'struct nfs_client' that matches the server's
+ * address, major version number, and session ID. It is the caller's
+ * responsibility to release the returned reference.
+ *
+ * Returns NULL if there are no connections with sessions, or if no session
+ * matches the one of interest.
+ */
+ static struct nfs_client *find_client_with_session(
+ const struct sockaddr *addr, u32 nfsversion,
+ struct nfs4_sessionid *sessionid)
+{
+ struct nfs_client *clp;
+
+ clp = nfs_find_client(addr, 4);
+ if (clp == NULL)
+ return NULL;
+
+ do {
+ struct nfs_client *prev = clp;
+
+ if (clp->cl_session != NULL) {
+ if (memcmp(clp->cl_session->sess_id.data,
+ sessionid->data,
+ NFS4_MAX_SESSIONID_LEN) == 0) {
+ /* Returns a held reference to clp */
+ return clp;
+ }
+ }
+ clp = nfs_find_client_next(prev);
+ nfs_put_client(prev);
+ } while (clp != NULL);
+
+ return NULL;
+}
+
+/* FIXME: referring calls should be processed */
+unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res)
+{
+ struct nfs_client *clp;
+ int i, status;
+
+ for (i = 0; i < args->csa_nrclists; i++)
+ kfree(args->csa_rclists[i].rcl_refcalls);
+ kfree(args->csa_rclists);
+
+ status = htonl(NFS4ERR_BADSESSION);
+ clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
+ if (clp == NULL)
+ goto out;
+
+ status = validate_seqid(&clp->cl_session->bc_slot_table,
+ args->csa_slotid, args->csa_sequenceid);
+ if (status)
+ goto out_putclient;
+
+ memcpy(&res->csr_sessionid, &args->csa_sessionid,
+ sizeof(res->csr_sessionid));
+ res->csr_sequenceid = args->csa_sequenceid;
+ res->csr_slotid = args->csa_slotid;
+ res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+ res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+
+out_putclient:
+ nfs_put_client(clp);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ res->csr_status = status;
+ return res->csr_status;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index dd0ef34..e5a2dac 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -20,6 +20,11 @@
2 + 2 + 3 + 3)
#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#if defined(CONFIG_NFS_V4_1)
+#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
+ 4 + 1 + 3)
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFSDBG_FACILITY NFSDBG_CALLBACK
typedef __be32 (*callback_process_op_t)(void *, void *);
@@ -132,7 +137,6 @@
static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
{
__be32 *p;
- unsigned int minor_version;
__be32 status;
status = decode_string(xdr, &hdr->taglen, &hdr->tag);
@@ -147,15 +151,19 @@
p = read_buf(xdr, 12);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
- minor_version = ntohl(*p++);
- /* Check minor version is zero. */
- if (minor_version != 0) {
- printk(KERN_WARNING "%s: NFSv4 server callback with illegal minor version %u!\n",
- __func__, minor_version);
+ hdr->minorversion = ntohl(*p++);
+ /* Check minor version is zero or one. */
+ if (hdr->minorversion <= 1) {
+ p++; /* skip callback_ident */
+ } else {
+ printk(KERN_WARNING "%s: NFSv4 server callback with "
+ "illegal minor version %u!\n",
+ __func__, hdr->minorversion);
return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
}
- hdr->callback_ident = ntohl(*p++);
hdr->nops = ntohl(*p);
+ dprintk("%s: minorversion %d nops %d\n", __func__,
+ hdr->minorversion, hdr->nops);
return 0;
}
@@ -204,6 +212,122 @@
return status;
}
+#if defined(CONFIG_NFS_V4_1)
+
+static unsigned decode_sessionid(struct xdr_stream *xdr,
+ struct nfs4_sessionid *sid)
+{
+ uint32_t *p;
+ int len = NFS4_MAX_SESSIONID_LEN;
+
+ p = read_buf(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);;
+
+ memcpy(sid->data, p, len);
+ return 0;
+}
+
+static unsigned decode_rc_list(struct xdr_stream *xdr,
+ struct referring_call_list *rc_list)
+{
+ uint32_t *p;
+ int i;
+ unsigned status;
+
+ status = decode_sessionid(xdr, &rc_list->rcl_sessionid);
+ if (status)
+ goto out;
+
+ status = htonl(NFS4ERR_RESOURCE);
+ p = read_buf(xdr, sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+
+ rc_list->rcl_nrefcalls = ntohl(*p++);
+ if (rc_list->rcl_nrefcalls) {
+ p = read_buf(xdr,
+ rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+ rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
+ sizeof(*rc_list->rcl_refcalls),
+ GFP_KERNEL);
+ if (unlikely(rc_list->rcl_refcalls == NULL))
+ goto out;
+ for (i = 0; i < rc_list->rcl_nrefcalls; i++) {
+ rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++);
+ rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++);
+ }
+ }
+ status = 0;
+
+out:
+ return status;
+}
+
+static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_sequenceargs *args)
+{
+ uint32_t *p;
+ int i;
+ unsigned status;
+
+ status = decode_sessionid(xdr, &args->csa_sessionid);
+ if (status)
+ goto out;
+
+ status = htonl(NFS4ERR_RESOURCE);
+ p = read_buf(xdr, 5 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+
+ args->csa_addr = svc_addr(rqstp);
+ args->csa_sequenceid = ntohl(*p++);
+ args->csa_slotid = ntohl(*p++);
+ args->csa_highestslotid = ntohl(*p++);
+ args->csa_cachethis = ntohl(*p++);
+ args->csa_nrclists = ntohl(*p++);
+ args->csa_rclists = NULL;
+ if (args->csa_nrclists) {
+ args->csa_rclists = kmalloc(args->csa_nrclists *
+ sizeof(*args->csa_rclists),
+ GFP_KERNEL);
+ if (unlikely(args->csa_rclists == NULL))
+ goto out;
+
+ for (i = 0; i < args->csa_nrclists; i++) {
+ status = decode_rc_list(xdr, &args->csa_rclists[i]);
+ if (status)
+ goto out_free;
+ }
+ }
+ status = 0;
+
+ dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u "
+ "highestslotid %u cachethis %d nrclists %u\n",
+ __func__,
+ ((u32 *)&args->csa_sessionid)[0],
+ ((u32 *)&args->csa_sessionid)[1],
+ ((u32 *)&args->csa_sessionid)[2],
+ ((u32 *)&args->csa_sessionid)[3],
+ args->csa_sequenceid, args->csa_slotid,
+ args->csa_highestslotid, args->csa_cachethis,
+ args->csa_nrclists);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+
+out_free:
+ for (i = 0; i < args->csa_nrclists; i++)
+ kfree(args->csa_rclists[i].rcl_refcalls);
+ kfree(args->csa_rclists);
+ goto out;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
__be32 *p;
@@ -353,31 +477,134 @@
return status;
}
-static __be32 process_op(struct svc_rqst *rqstp,
+#if defined(CONFIG_NFS_V4_1)
+
+static unsigned encode_sessionid(struct xdr_stream *xdr,
+ const struct nfs4_sessionid *sid)
+{
+ uint32_t *p;
+ int len = NFS4_MAX_SESSIONID_LEN;
+
+ p = xdr_reserve_space(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ memcpy(p, sid, len);
+ return 0;
+}
+
+static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ const struct cb_sequenceres *res)
+{
+ uint32_t *p;
+ unsigned status = res->csr_status;
+
+ if (unlikely(status != 0))
+ goto out;
+
+ encode_sessionid(xdr, &res->csr_sessionid);
+
+ p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ *p++ = htonl(res->csr_sequenceid);
+ *p++ = htonl(res->csr_slotid);
+ *p++ = htonl(res->csr_highestslotid);
+ *p++ = htonl(res->csr_target_highestslotid);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+static __be32
+preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
+{
+ if (op_nr == OP_CB_SEQUENCE) {
+ if (nop != 0)
+ return htonl(NFS4ERR_SEQUENCE_POS);
+ } else {
+ if (nop == 0)
+ return htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ }
+
+ switch (op_nr) {
+ case OP_CB_GETATTR:
+ case OP_CB_RECALL:
+ case OP_CB_SEQUENCE:
+ *op = &callback_ops[op_nr];
+ break;
+
+ case OP_CB_LAYOUTRECALL:
+ case OP_CB_NOTIFY_DEVICEID:
+ case OP_CB_NOTIFY:
+ case OP_CB_PUSH_DELEG:
+ case OP_CB_RECALL_ANY:
+ case OP_CB_RECALLABLE_OBJ_AVAIL:
+ case OP_CB_RECALL_SLOT:
+ case OP_CB_WANTS_CANCELLED:
+ case OP_CB_NOTIFY_LOCK:
+ return htonl(NFS4ERR_NOTSUPP);
+
+ default:
+ return htonl(NFS4ERR_OP_ILLEGAL);
+ }
+
+ return htonl(NFS_OK);
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+static __be32
+preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
+{
+ return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+static __be32
+preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
+{
+ switch (op_nr) {
+ case OP_CB_GETATTR:
+ case OP_CB_RECALL:
+ *op = &callback_ops[op_nr];
+ break;
+ default:
+ return htonl(NFS4ERR_OP_ILLEGAL);
+ }
+
+ return htonl(NFS_OK);
+}
+
+static __be32 process_op(uint32_t minorversion, int nop,
+ struct svc_rqst *rqstp,
struct xdr_stream *xdr_in, void *argp,
struct xdr_stream *xdr_out, void *resp)
{
struct callback_op *op = &callback_ops[0];
unsigned int op_nr = OP_CB_ILLEGAL;
- __be32 status = 0;
+ __be32 status;
long maxlen;
__be32 res;
dprintk("%s: start\n", __func__);
status = decode_op_hdr(xdr_in, &op_nr);
- if (likely(status == 0)) {
- switch (op_nr) {
- case OP_CB_GETATTR:
- case OP_CB_RECALL:
- op = &callback_ops[op_nr];
- break;
- default:
- op_nr = OP_CB_ILLEGAL;
- op = &callback_ops[0];
- status = htonl(NFS4ERR_OP_ILLEGAL);
- }
+ if (unlikely(status)) {
+ status = htonl(NFS4ERR_OP_ILLEGAL);
+ goto out;
}
+ dprintk("%s: minorversion=%d nop=%d op_nr=%u\n",
+ __func__, minorversion, nop, op_nr);
+
+ status = minorversion ? preprocess_nfs41_op(nop, op_nr, &op) :
+ preprocess_nfs4_op(op_nr, &op);
+ if (status == htonl(NFS4ERR_OP_ILLEGAL))
+ op_nr = OP_CB_ILLEGAL;
+out:
maxlen = xdr_out->end - xdr_out->p;
if (maxlen > 0 && maxlen < PAGE_SIZE) {
if (likely(status == 0 && op->decode_args != NULL))
@@ -425,7 +652,8 @@
return rpc_system_err;
while (status == 0 && nops != hdr_arg.nops) {
- status = process_op(rqstp, &xdr_in, argp, &xdr_out, resp);
+ status = process_op(hdr_arg.minorversion, nops,
+ rqstp, &xdr_in, argp, &xdr_out, resp);
nops++;
}
@@ -452,7 +680,15 @@
.process_op = (callback_process_op_t)nfs4_callback_recall,
.decode_args = (callback_decode_arg_t)decode_recall_args,
.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
- }
+ },
+#if defined(CONFIG_NFS_V4_1)
+ [OP_CB_SEQUENCE] = {
+ .process_op = (callback_process_op_t)nfs4_callback_sequence,
+ .decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
+ .encode_res = (callback_encode_res_t)encode_cb_sequence_res,
+ .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
+ },
+#endif /* CONFIG_NFS_V4_1 */
};
/*
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 75c9cd2..c2d0616 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -37,6 +37,7 @@
#include <linux/in6.h>
#include <net/ipv6.h>
#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/bc_xprt.h>
#include <asm/system.h>
@@ -102,6 +103,7 @@
size_t addrlen;
const struct nfs_rpc_ops *rpc_ops;
int proto;
+ u32 minorversion;
};
/*
@@ -114,18 +116,13 @@
{
struct nfs_client *clp;
struct rpc_cred *cred;
+ int err = -ENOMEM;
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
goto error_0;
clp->rpc_ops = cl_init->rpc_ops;
- if (cl_init->rpc_ops->version == 4) {
- if (nfs_callback_up() < 0)
- goto error_2;
- __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
- }
-
atomic_set(&clp->cl_count, 1);
clp->cl_cons_state = NFS_CS_INITING;
@@ -133,9 +130,10 @@
clp->cl_addrlen = cl_init->addrlen;
if (cl_init->hostname) {
+ err = -ENOMEM;
clp->cl_hostname = kstrdup(cl_init->hostname, GFP_KERNEL);
if (!clp->cl_hostname)
- goto error_3;
+ goto error_cleanup;
}
INIT_LIST_HEAD(&clp->cl_superblocks);
@@ -150,6 +148,7 @@
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
+ clp->cl_minorversion = cl_init->minorversion;
#endif
cred = rpc_lookup_machine_cred();
if (!IS_ERR(cred))
@@ -159,13 +158,10 @@
return clp;
-error_3:
- if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down();
-error_2:
+error_cleanup:
kfree(clp);
error_0:
- return NULL;
+ return ERR_PTR(err);
}
static void nfs4_shutdown_client(struct nfs_client *clp)
@@ -182,12 +178,42 @@
}
/*
+ * Destroy the NFS4 callback service
+ */
+static void nfs4_destroy_callback(struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4
+ if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
+ nfs_callback_down(clp->cl_minorversion);
+#endif /* CONFIG_NFS_V4 */
+}
+
+/*
+ * Clears/puts all minor version specific parts from an nfs_client struct
+ * reverting it to minorversion 0.
+ */
+static void nfs4_clear_client_minor_version(struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp)) {
+ nfs4_destroy_session(clp->cl_session);
+ clp->cl_session = NULL;
+ }
+
+ clp->cl_call_sync = _nfs4_call_sync;
+#endif /* CONFIG_NFS_V4_1 */
+
+ nfs4_destroy_callback(clp);
+}
+
+/*
* Destroy a shared client record
*/
static void nfs_free_client(struct nfs_client *clp)
{
dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
+ nfs4_clear_client_minor_version(clp);
nfs4_shutdown_client(clp);
nfs_fscache_release_client_cookie(clp);
@@ -196,9 +222,6 @@
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
- if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down();
-
if (clp->cl_machine_cred != NULL)
put_rpccred(clp->cl_machine_cred);
@@ -347,7 +370,8 @@
struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
/* Don't match clients that failed to initialise properly */
- if (clp->cl_cons_state != NFS_CS_READY)
+ if (!(clp->cl_cons_state == NFS_CS_READY ||
+ clp->cl_cons_state == NFS_CS_SESSION_INITING))
continue;
/* Different NFS versions cannot share the same nfs_client */
@@ -420,7 +444,9 @@
if (clp->cl_proto != data->proto)
continue;
-
+ /* Match nfsv4 minorversion */
+ if (clp->cl_minorversion != data->minorversion)
+ continue;
/* Match the full socket address */
if (!nfs_sockaddr_cmp(sap, clap))
continue;
@@ -456,9 +482,10 @@
spin_unlock(&nfs_client_lock);
new = nfs_alloc_client(cl_init);
- } while (new);
+ } while (!IS_ERR(new));
- return ERR_PTR(-ENOMEM);
+ dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
+ return new;
/* install a new client and return with it unready */
install_client:
@@ -478,7 +505,7 @@
nfs_free_client(new);
error = wait_event_killable(nfs_client_active_wq,
- clp->cl_cons_state != NFS_CS_INITING);
+ clp->cl_cons_state < NFS_CS_INITING);
if (error < 0) {
nfs_put_client(clp);
return ERR_PTR(-ERESTARTSYS);
@@ -499,13 +526,29 @@
/*
* Mark a server as ready or failed
*/
-static void nfs_mark_client_ready(struct nfs_client *clp, int state)
+void nfs_mark_client_ready(struct nfs_client *clp, int state)
{
clp->cl_cons_state = state;
wake_up_all(&nfs_client_active_wq);
}
/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+int nfs4_check_client_ready(struct nfs_client *clp)
+{
+ if (!nfs4_has_session(clp))
+ return 0;
+ if (clp->cl_cons_state < NFS_CS_READY)
+ return -EPROTONOSUPPORT;
+ return 0;
+}
+
+/*
* Initialise the timeout values for a connection
*/
static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
@@ -1050,6 +1093,61 @@
#ifdef CONFIG_NFS_V4
/*
+ * Initialize the NFS4 callback service
+ */
+static int nfs4_init_callback(struct nfs_client *clp)
+{
+ int error;
+
+ if (clp->rpc_ops->version == 4) {
+ if (nfs4_has_session(clp)) {
+ error = xprt_setup_backchannel(
+ clp->cl_rpcclient->cl_xprt,
+ NFS41_BC_MIN_CALLBACKS);
+ if (error < 0)
+ return error;
+ }
+
+ error = nfs_callback_up(clp->cl_minorversion,
+ clp->cl_rpcclient->cl_xprt);
+ if (error < 0) {
+ dprintk("%s: failed to start callback. Error = %d\n",
+ __func__, error);
+ return error;
+ }
+ __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
+ }
+ return 0;
+}
+
+/*
+ * Initialize the minor version specific parts of an NFS4 client record
+ */
+static int nfs4_init_client_minor_version(struct nfs_client *clp)
+{
+ clp->cl_call_sync = _nfs4_call_sync;
+
+#if defined(CONFIG_NFS_V4_1)
+ if (clp->cl_minorversion) {
+ struct nfs4_session *session = NULL;
+ /*
+ * Create the session and mark it expired.
+ * When a SEQUENCE operation encounters the expired session
+ * it will do session recovery to initialize it.
+ */
+ session = nfs4_alloc_session(clp);
+ if (!session)
+ return -ENOMEM;
+
+ clp->cl_session = session;
+ clp->cl_call_sync = _nfs4_call_sync_session;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+
+ return nfs4_init_callback(clp);
+}
+
+/*
* Initialise an NFS4 client record
*/
static int nfs4_init_client(struct nfs_client *clp,
@@ -1083,7 +1181,12 @@
}
__set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
- nfs_mark_client_ready(clp, NFS_CS_READY);
+ error = nfs4_init_client_minor_version(clp);
+ if (error < 0)
+ goto error;
+
+ if (!nfs4_has_session(clp))
+ nfs_mark_client_ready(clp, NFS_CS_READY);
return 0;
error:
@@ -1101,7 +1204,8 @@
const size_t addrlen,
const char *ip_addr,
rpc_authflavor_t authflavour,
- int proto, const struct rpc_timeout *timeparms)
+ int proto, const struct rpc_timeout *timeparms,
+ u32 minorversion)
{
struct nfs_client_initdata cl_init = {
.hostname = hostname,
@@ -1109,6 +1213,7 @@
.addrlen = addrlen,
.rpc_ops = &nfs_v4_clientops,
.proto = proto,
+ .minorversion = minorversion,
};
struct nfs_client *clp;
int error;
@@ -1138,6 +1243,36 @@
}
/*
+ * Initialize a session.
+ * Note: save the mount rsize and wsize for create_server negotiation.
+ */
+static void nfs4_init_session(struct nfs_client *clp,
+ unsigned int wsize, unsigned int rsize)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (nfs4_has_session(clp)) {
+ clp->cl_session->fc_attrs.max_rqst_sz = wsize;
+ clp->cl_session->fc_attrs.max_resp_sz = rsize;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/*
+ * Session has been established, and the client marked ready.
+ * Set the mount rsize and wsize with negotiated fore channel
+ * attributes which will be bound checked in nfs_server_set_fsinfo.
+ */
+static void nfs4_session_set_rwsize(struct nfs_server *server)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (!nfs4_has_session(server->nfs_client))
+ return;
+ server->rsize = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ server->wsize = server->nfs_client->cl_session->fc_attrs.max_rqst_sz;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/*
* Create a version 4 volume record
*/
static int nfs4_init_server(struct nfs_server *server,
@@ -1164,7 +1299,8 @@
data->client_address,
data->auth_flavors[0],
data->nfs_server.protocol,
- &timeparms);
+ &timeparms,
+ data->minorversion);
if (error < 0)
goto error;
@@ -1214,6 +1350,8 @@
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
+ nfs4_init_session(server->nfs_client, server->wsize, server->rsize);
+
/* Probe the root fh to retrieve its FSID */
error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
if (error < 0)
@@ -1224,6 +1362,8 @@
(unsigned long long) server->fsid.minor);
dprintk("Mount FH: %d\n", mntfh->size);
+ nfs4_session_set_rwsize(server);
+
error = nfs_probe_fsinfo(server, mntfh, &fattr);
if (error < 0)
goto error;
@@ -1282,7 +1422,8 @@
parent_client->cl_ipaddr,
data->authflavor,
parent_server->client->cl_xprt->prot,
- parent_server->client->cl_timeout);
+ parent_server->client->cl_timeout,
+ parent_client->cl_minorversion);
if (error < 0)
goto error;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 968225a..af05b91 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -68,29 +68,26 @@
{
struct inode *inode = state->inode;
struct file_lock *fl;
- int status;
+ int status = 0;
+ if (inode->i_flock == NULL)
+ goto out;
+
+ /* Protect inode->i_flock using the BKL */
+ lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file) != ctx)
continue;
+ unlock_kernel();
status = nfs4_lock_delegation_recall(state, fl);
- if (status >= 0)
- continue;
- switch (status) {
- default:
- printk(KERN_ERR "%s: unhandled error %d.\n",
- __func__, status);
- case -NFS4ERR_EXPIRED:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
- case -NFS4ERR_STALE_CLIENTID:
- nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
- goto out_err;
- }
+ if (status < 0)
+ goto out;
+ lock_kernel();
}
- return 0;
-out_err:
+ unlock_kernel();
+out:
return status;
}
@@ -268,7 +265,10 @@
struct nfs_inode *nfsi = NFS_I(inode);
nfs_msync_inode(inode);
- /* Guard against new delegated open calls */
+ /*
+ * Guard against new delegated open/lock/unlock calls and against
+ * state recovery
+ */
down_write(&nfsi->rwsem);
nfs_delegation_claim_opens(inode, &delegation->stateid);
up_write(&nfsi->rwsem);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 08f6b04..489fc01 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -259,6 +259,9 @@
}
static const struct rpc_call_ops nfs_read_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_read_result,
.rpc_release = nfs_direct_read_release,
};
@@ -535,6 +538,9 @@
}
static const struct rpc_call_ops nfs_commit_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_commit_result,
.rpc_release = nfs_direct_commit_release,
};
@@ -673,6 +679,9 @@
}
static const struct rpc_call_ops nfs_write_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_write_result,
.rpc_release = nfs_direct_write_release,
};
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index ec7e27d..0055b81 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -48,6 +48,9 @@
size_t count, unsigned int flags);
static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
+static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t count, unsigned int flags);
static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
static int nfs_file_flush(struct file *, fl_owner_t id);
@@ -73,6 +76,7 @@
.lock = nfs_lock,
.flock = nfs_flock,
.splice_read = nfs_file_splice_read,
+ .splice_write = nfs_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = nfs_setlease,
};
@@ -587,12 +591,38 @@
goto out;
}
+static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t count, unsigned int flags)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ ssize_t ret;
+
+ dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) count, (unsigned long long) *ppos);
+
+ /*
+ * The combination of splice and an O_APPEND destination is disallowed.
+ */
+
+ nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
+
+ ret = generic_file_splice_write(pipe, filp, ppos, count, flags);
+ if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
+ int err = nfs_do_fsync(nfs_file_open_context(filp), inode);
+ if (err < 0)
+ ret = err;
+ }
+ return ret;
+}
+
static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
{
struct inode *inode = filp->f_mapping->host;
int status = 0;
- lock_kernel();
/* Try local locking first */
posix_test_lock(filp, fl);
if (fl->fl_type != F_UNLCK) {
@@ -608,7 +638,6 @@
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
out:
- unlock_kernel();
return status;
out_noconflict:
fl->fl_type = F_UNLCK;
@@ -650,13 +679,11 @@
* If we're signalled while cleaning up locks on process exit, we
* still need to complete the unlock.
*/
- lock_kernel();
/* Use local locking if mounted with "-onolock" */
if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
status = do_vfs_lock(filp, fl);
- unlock_kernel();
return status;
}
@@ -673,13 +700,11 @@
if (status != 0)
goto out;
- lock_kernel();
/* Use local locking if mounted with "-onolock" */
if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
status = do_vfs_lock(filp, fl);
- unlock_kernel();
if (status < 0)
goto out;
/*
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e4d6a83..7dd90a6 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -2,6 +2,7 @@
* NFS internal definitions
*/
+#include "nfs4_fs.h"
#include <linux/mount.h>
#include <linux/security.h>
@@ -17,6 +18,18 @@
*/
#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
+/*
+ * Determine if sessions are in use.
+ */
+static inline int nfs4_has_session(const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (clp->cl_session)
+ return 1;
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
@@ -30,6 +43,12 @@
};
/*
+ * Note: RFC 1813 doesn't limit the number of auth flavors that
+ * a server can return, so make something up.
+ */
+#define NFS_MAX_SECFLAVORS (12)
+
+/*
* In-kernel mount arguments
*/
struct nfs_parsed_mount_data {
@@ -44,6 +63,7 @@
unsigned int auth_flavor_len;
rpc_authflavor_t auth_flavors[1];
char *client_address;
+ unsigned int minorversion;
char *fscache_uniq;
struct {
@@ -77,6 +97,8 @@
unsigned short protocol;
struct nfs_fh *fh;
int noresvport;
+ unsigned int *auth_flav_len;
+ rpc_authflavor_t *auth_flavs;
};
extern int nfs_mount(struct nfs_mount_request *info);
@@ -99,6 +121,8 @@
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *);
+extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
+extern int nfs4_check_client_ready(struct nfs_client *clp);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
@@ -146,6 +170,20 @@
extern struct rpc_procinfo nfs3_procedures[];
extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
+/* nfs4proc.c */
+static inline void nfs4_restart_rpc(struct rpc_task *task,
+ const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp) &&
+ test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+ rpc_restart_call(task);
+}
+
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
@@ -205,6 +243,38 @@
const char *path);
#endif
+/* read.c */
+extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
+
+/* write.c */
+extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
+
+/* nfs4proc.c */
+extern int _nfs4_call_sync(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+extern int _nfs4_call_sync_session(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+
+#ifdef CONFIG_NFS_V4_1
+extern void nfs41_sequence_free_slot(const struct nfs_client *,
+ struct nfs4_sequence_res *res);
+#endif /* CONFIG_NFS_V4_1 */
+
+static inline void nfs4_sequence_free_slot(const struct nfs_client *clp,
+ struct nfs4_sequence_res *res)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp))
+ nfs41_sequence_free_slot(clp, res);
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* Determine the device name as a string
*/
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index ca905a5..38ef9ea 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -20,8 +20,116 @@
# define NFSDBG_FACILITY NFSDBG_MOUNT
#endif
+/*
+ * Defined by RFC 1094, section A.3; and RFC 1813, section 5.1.4
+ */
+#define MNTPATHLEN (1024)
+
+/*
+ * XDR data type sizes
+ */
+#define encode_dirpath_sz (1 + XDR_QUADLEN(MNTPATHLEN))
+#define MNT_status_sz (1)
+#define MNT_fhs_status_sz (1)
+#define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE)
+#define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE))
+#define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS)
+
+/*
+ * XDR argument and result sizes
+ */
+#define MNT_enc_dirpath_sz encode_dirpath_sz
+#define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz)
+#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \
+ MNT_authflav3_sz)
+
+/*
+ * Defined by RFC 1094, section A.5
+ */
+enum {
+ MOUNTPROC_NULL = 0,
+ MOUNTPROC_MNT = 1,
+ MOUNTPROC_DUMP = 2,
+ MOUNTPROC_UMNT = 3,
+ MOUNTPROC_UMNTALL = 4,
+ MOUNTPROC_EXPORT = 5,
+};
+
+/*
+ * Defined by RFC 1813, section 5.2
+ */
+enum {
+ MOUNTPROC3_NULL = 0,
+ MOUNTPROC3_MNT = 1,
+ MOUNTPROC3_DUMP = 2,
+ MOUNTPROC3_UMNT = 3,
+ MOUNTPROC3_UMNTALL = 4,
+ MOUNTPROC3_EXPORT = 5,
+};
+
static struct rpc_program mnt_program;
+/*
+ * Defined by OpenGroup XNFS Version 3W, chapter 8
+ */
+enum mountstat {
+ MNT_OK = 0,
+ MNT_EPERM = 1,
+ MNT_ENOENT = 2,
+ MNT_EACCES = 13,
+ MNT_EINVAL = 22,
+};
+
+static struct {
+ u32 status;
+ int errno;
+} mnt_errtbl[] = {
+ { .status = MNT_OK, .errno = 0, },
+ { .status = MNT_EPERM, .errno = -EPERM, },
+ { .status = MNT_ENOENT, .errno = -ENOENT, },
+ { .status = MNT_EACCES, .errno = -EACCES, },
+ { .status = MNT_EINVAL, .errno = -EINVAL, },
+};
+
+/*
+ * Defined by RFC 1813, section 5.1.5
+ */
+enum mountstat3 {
+ MNT3_OK = 0, /* no error */
+ MNT3ERR_PERM = 1, /* Not owner */
+ MNT3ERR_NOENT = 2, /* No such file or directory */
+ MNT3ERR_IO = 5, /* I/O error */
+ MNT3ERR_ACCES = 13, /* Permission denied */
+ MNT3ERR_NOTDIR = 20, /* Not a directory */
+ MNT3ERR_INVAL = 22, /* Invalid argument */
+ MNT3ERR_NAMETOOLONG = 63, /* Filename too long */
+ MNT3ERR_NOTSUPP = 10004, /* Operation not supported */
+ MNT3ERR_SERVERFAULT = 10006, /* A failure on the server */
+};
+
+static struct {
+ u32 status;
+ int errno;
+} mnt3_errtbl[] = {
+ { .status = MNT3_OK, .errno = 0, },
+ { .status = MNT3ERR_PERM, .errno = -EPERM, },
+ { .status = MNT3ERR_NOENT, .errno = -ENOENT, },
+ { .status = MNT3ERR_IO, .errno = -EIO, },
+ { .status = MNT3ERR_ACCES, .errno = -EACCES, },
+ { .status = MNT3ERR_NOTDIR, .errno = -ENOTDIR, },
+ { .status = MNT3ERR_INVAL, .errno = -EINVAL, },
+ { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
+ { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
+ { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
+};
+
+struct mountres {
+ int errno;
+ struct nfs_fh *fh;
+ unsigned int *auth_count;
+ rpc_authflavor_t *auth_flavors;
+};
+
struct mnt_fhstatus {
u32 status;
struct nfs_fh *fh;
@@ -35,8 +143,10 @@
*/
int nfs_mount(struct nfs_mount_request *info)
{
- struct mnt_fhstatus result = {
- .fh = info->fh
+ struct mountres result = {
+ .fh = info->fh,
+ .auth_count = info->auth_flav_len,
+ .auth_flavors = info->auth_flavs,
};
struct rpc_message msg = {
.rpc_argp = info->dirpath,
@@ -68,14 +178,14 @@
if (info->version == NFS_MNT3_VERSION)
msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT];
else
- msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT];
+ msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT];
status = rpc_call_sync(mnt_clnt, &msg, 0);
rpc_shutdown_client(mnt_clnt);
if (status < 0)
goto out_call_err;
- if (result.status != 0)
+ if (result.errno != 0)
goto out_mnt_err;
dprintk("NFS: MNT request succeeded\n");
@@ -86,72 +196,215 @@
out_clnt_err:
status = PTR_ERR(mnt_clnt);
- dprintk("NFS: failed to create RPC client, status=%d\n", status);
+ dprintk("NFS: failed to create MNT RPC client, status=%d\n", status);
goto out;
out_call_err:
- dprintk("NFS: failed to start MNT request, status=%d\n", status);
+ dprintk("NFS: MNT request failed, status=%d\n", status);
goto out;
out_mnt_err:
- dprintk("NFS: MNT server returned result %d\n", result.status);
- status = nfs_stat_to_errno(result.status);
+ dprintk("NFS: MNT server returned result %d\n", result.errno);
+ status = result.errno;
goto out;
}
/*
* XDR encode/decode functions for MOUNT
*/
-static int xdr_encode_dirpath(struct rpc_rqst *req, __be32 *p,
- const char *path)
-{
- p = xdr_encode_string(p, path);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+{
+ const u32 pathname_len = strlen(pathname);
+ __be32 *p;
+
+ if (unlikely(pathname_len > MNTPATHLEN))
+ return -EIO;
+
+ p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len);
+ if (unlikely(p == NULL))
+ return -EIO;
+ xdr_encode_opaque(p, pathname, pathname_len);
+
return 0;
}
-static int xdr_decode_fhstatus(struct rpc_rqst *req, __be32 *p,
- struct mnt_fhstatus *res)
+static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p,
+ const char *dirpath)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ return encode_mntdirpath(&xdr, dirpath);
+}
+
+/*
+ * RFC 1094: "A non-zero status indicates some sort of error. In this
+ * case, the status is a UNIX error number." This can be problematic
+ * if the server and client use different errno values for the same
+ * error.
+ *
+ * However, the OpenGroup XNFS spec provides a simple mapping that is
+ * independent of local errno values on the server and the client.
+ */
+static int decode_status(struct xdr_stream *xdr, struct mountres *res)
+{
+ unsigned int i;
+ u32 status;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, sizeof(status));
+ if (unlikely(p == NULL))
+ return -EIO;
+ status = ntohl(*p);
+
+ for (i = 0; i <= ARRAY_SIZE(mnt_errtbl); i++) {
+ if (mnt_errtbl[i].status == status) {
+ res->errno = mnt_errtbl[i].errno;
+ return 0;
+ }
+ }
+
+ dprintk("NFS: unrecognized MNT status code: %u\n", status);
+ res->errno = -EACCES;
+ return 0;
+}
+
+static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res)
{
struct nfs_fh *fh = res->fh;
+ __be32 *p;
- if ((res->status = ntohl(*p++)) == 0) {
- fh->size = NFS2_FHSIZE;
- memcpy(fh->data, p, NFS2_FHSIZE);
- }
+ p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ fh->size = NFS2_FHSIZE;
+ memcpy(fh->data, p, NFS2_FHSIZE);
return 0;
}
-static int xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p,
- struct mnt_fhstatus *res)
+static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p,
+ struct mountres *res)
+{
+ struct xdr_stream xdr;
+ int status;
+
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+
+ status = decode_status(&xdr, res);
+ if (unlikely(status != 0 || res->errno != 0))
+ return status;
+ return decode_fhandle(&xdr, res);
+}
+
+static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
+{
+ unsigned int i;
+ u32 status;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, sizeof(status));
+ if (unlikely(p == NULL))
+ return -EIO;
+ status = ntohl(*p);
+
+ for (i = 0; i <= ARRAY_SIZE(mnt3_errtbl); i++) {
+ if (mnt3_errtbl[i].status == status) {
+ res->errno = mnt3_errtbl[i].errno;
+ return 0;
+ }
+ }
+
+ dprintk("NFS: unrecognized MNT3 status code: %u\n", status);
+ res->errno = -EACCES;
+ return 0;
+}
+
+static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res)
{
struct nfs_fh *fh = res->fh;
- unsigned size;
+ u32 size;
+ __be32 *p;
- if ((res->status = ntohl(*p++)) == 0) {
- size = ntohl(*p++);
- if (size <= NFS3_FHSIZE && size != 0) {
- fh->size = size;
- memcpy(fh->data, p, size);
- } else
- res->status = -EBADHANDLE;
- }
+ p = xdr_inline_decode(xdr, sizeof(size));
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ size = ntohl(*p++);
+ if (size > NFS3_FHSIZE || size == 0)
+ return -EIO;
+
+ p = xdr_inline_decode(xdr, size);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ fh->size = size;
+ memcpy(fh->data, p, size);
return 0;
}
-#define MNT_dirpath_sz (1 + 256)
-#define MNT_fhstatus_sz (1 + 8)
-#define MNT_fhstatus3_sz (1 + 16)
+static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
+{
+ rpc_authflavor_t *flavors = res->auth_flavors;
+ unsigned int *count = res->auth_count;
+ u32 entries, i;
+ __be32 *p;
+
+ if (*count == 0)
+ return 0;
+
+ p = xdr_inline_decode(xdr, sizeof(entries));
+ if (unlikely(p == NULL))
+ return -EIO;
+ entries = ntohl(*p);
+ dprintk("NFS: received %u auth flavors\n", entries);
+ if (entries > NFS_MAX_SECFLAVORS)
+ entries = NFS_MAX_SECFLAVORS;
+
+ p = xdr_inline_decode(xdr, sizeof(u32) * entries);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ if (entries > *count)
+ entries = *count;
+
+ for (i = 0; i < entries; i++) {
+ flavors[i] = ntohl(*p++);
+ dprintk("NFS:\tflavor %u: %d\n", i, flavors[i]);
+ }
+ *count = i;
+
+ return 0;
+}
+
+static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p,
+ struct mountres *res)
+{
+ struct xdr_stream xdr;
+ int status;
+
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+
+ status = decode_fhs_status(&xdr, res);
+ if (unlikely(status != 0 || res->errno != 0))
+ return status;
+ status = decode_fhandle3(&xdr, res);
+ if (unlikely(status != 0)) {
+ res->errno = -EBADHANDLE;
+ return 0;
+ }
+ return decode_auth_flavors(&xdr, res);
+}
static struct rpc_procinfo mnt_procedures[] = {
- [MNTPROC_MNT] = {
- .p_proc = MNTPROC_MNT,
- .p_encode = (kxdrproc_t) xdr_encode_dirpath,
- .p_decode = (kxdrproc_t) xdr_decode_fhstatus,
- .p_arglen = MNT_dirpath_sz,
- .p_replen = MNT_fhstatus_sz,
- .p_statidx = MNTPROC_MNT,
+ [MOUNTPROC_MNT] = {
+ .p_proc = MOUNTPROC_MNT,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_decode = (kxdrproc_t)mnt_dec_mountres,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_replen = MNT_dec_mountres_sz,
+ .p_statidx = MOUNTPROC_MNT,
.p_name = "MOUNT",
},
};
@@ -159,10 +412,10 @@
static struct rpc_procinfo mnt3_procedures[] = {
[MOUNTPROC3_MNT] = {
.p_proc = MOUNTPROC3_MNT,
- .p_encode = (kxdrproc_t) xdr_encode_dirpath,
- .p_decode = (kxdrproc_t) xdr_decode_fhstatus3,
- .p_arglen = MNT_dirpath_sz,
- .p_replen = MNT_fhstatus3_sz,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_decode = (kxdrproc_t)mnt_dec_mountres3,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_replen = MNT_dec_mountres3_sz,
.p_statidx = MOUNTPROC3_MNT,
.p_name = "MOUNT",
},
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index f01caec..40c7667 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -65,6 +65,11 @@
dentry = dentry->d_parent;
}
spin_unlock(&dcache_lock);
+ if (*end != '/') {
+ if (--buflen < 0)
+ goto Elong;
+ *--end = '/';
+ }
namelen = strlen(base);
/* Strip off excess slashes in base string */
while (namelen > 0 && base[namelen - 1] == '/')
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 6bbf0e6..bac6051 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -207,8 +207,6 @@
status = nfs_revalidate_inode(server, inode);
if (status < 0)
return ERR_PTR(status);
- if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
- nfs_zap_acl_cache(inode);
acl = nfs3_get_cached_acl(inode, type);
if (acl != ERR_PTR(-EAGAIN))
return acl;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 84345de..61bc3a3 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@
NFS4CLNT_RECLAIM_REBOOT,
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
+ NFS4CLNT_SESSION_SETUP,
};
/*
@@ -177,6 +178,14 @@
int state_flag_bit;
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
+ int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
+ struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
+};
+
+struct nfs4_state_maintenance_ops {
+ int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *);
+ struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
+ int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
};
extern const struct dentry_operations nfs4_dentry_operations;
@@ -193,6 +202,7 @@
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
@@ -200,8 +210,26 @@
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page);
-extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
-extern struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops;
+extern struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[];
+extern struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[];
+#if defined(CONFIG_NFS_V4_1)
+extern int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ int cache_reply, struct rpc_task *task);
+extern void nfs4_destroy_session(struct nfs4_session *session);
+extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
+extern int nfs4_proc_create_session(struct nfs_client *, int reset);
+extern int nfs4_proc_destroy_session(struct nfs4_session *);
+#else /* CONFIG_NFS_v4_1 */
+static inline int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ int cache_reply, struct rpc_task *task)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
extern const u32 nfs4_fattr_bitmap[2];
extern const u32 nfs4_statfs_bitmap[2];
@@ -216,7 +244,12 @@
extern void nfs4_renew_state(struct work_struct *);
/* nfs4state.c */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
+#if defined(CONFIG_NFS_V4_1)
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
+struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
+#endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4674f80..92ce435 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -48,11 +48,14 @@
#include <linux/smp_lock.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/module.h>
+#include <linux/sunrpc/bc_xprt.h>
#include "nfs4_fs.h"
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
+#include "callback.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -247,7 +250,25 @@
ret = nfs4_wait_clnt_recover(clp);
if (ret == 0)
exception->retry = 1;
+#if !defined(CONFIG_NFS_V4_1)
break;
+#else /* !defined(CONFIG_NFS_V4_1) */
+ if (!nfs4_has_session(server->nfs_client))
+ break;
+ /* FALLTHROUGH */
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR: %d Reset session\n", __func__,
+ errorcode);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ exception->retry = 1;
+ /* FALLTHROUGH */
+#endif /* !defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
@@ -271,6 +292,353 @@
spin_unlock(&clp->cl_lock);
}
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * nfs4_free_slot - free a slot and efficiently update slot table.
+ *
+ * freeing a slot is trivially done by clearing its respective bit
+ * in the bitmap.
+ * If the freed slotid equals highest_used_slotid we want to update it
+ * so that the server would be able to size down the slot table if needed,
+ * otherwise we know that the highest_used_slotid is still in use.
+ * When updating highest_used_slotid there may be "holes" in the bitmap
+ * so we need to scan down from highest_used_slotid to 0 looking for the now
+ * highest slotid in use.
+ * If none found, highest_used_slotid is set to -1.
+ */
+static void
+nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
+{
+ int slotid = free_slotid;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ /* clear used bit in bitmap */
+ __clear_bit(slotid, tbl->used_slots);
+
+ /* update highest_used_slotid when it is freed */
+ if (slotid == tbl->highest_used_slotid) {
+ slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
+ if (slotid >= 0 && slotid < tbl->max_slots)
+ tbl->highest_used_slotid = slotid;
+ else
+ tbl->highest_used_slotid = -1;
+ }
+ rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
+ free_slotid, tbl->highest_used_slotid);
+}
+
+void nfs41_sequence_free_slot(const struct nfs_client *clp,
+ struct nfs4_sequence_res *res)
+{
+ struct nfs4_slot_table *tbl;
+
+ if (!nfs4_has_session(clp)) {
+ dprintk("%s: No session\n", __func__);
+ return;
+ }
+ tbl = &clp->cl_session->fc_slot_table;
+ if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
+ dprintk("%s: No slot\n", __func__);
+ /* just wake up the next guy waiting since
+ * we may have not consumed a slot after all */
+ rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ return;
+ }
+ nfs4_free_slot(tbl, res->sr_slotid);
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+}
+
+static void nfs41_sequence_done(struct nfs_client *clp,
+ struct nfs4_sequence_res *res,
+ int rpc_status)
+{
+ unsigned long timestamp;
+ struct nfs4_slot_table *tbl;
+ struct nfs4_slot *slot;
+
+ /*
+ * sr_status remains 1 if an RPC level error occurred. The server
+ * may or may not have processed the sequence operation..
+ * Proceed as if the server received and processed the sequence
+ * operation.
+ */
+ if (res->sr_status == 1)
+ res->sr_status = NFS_OK;
+
+ /* -ERESTARTSYS can result in skipping nfs41_sequence_setup */
+ if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
+ goto out;
+
+ tbl = &clp->cl_session->fc_slot_table;
+ slot = tbl->slots + res->sr_slotid;
+
+ if (res->sr_status == 0) {
+ /* Update the slot's sequence and clientid lease timer */
+ ++slot->seq_nr;
+ timestamp = res->sr_renewal_time;
+ spin_lock(&clp->cl_lock);
+ if (time_before(clp->cl_last_renewal, timestamp))
+ clp->cl_last_renewal = timestamp;
+ spin_unlock(&clp->cl_lock);
+ return;
+ }
+out:
+ /* The session may be reset by one of the error handlers. */
+ dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
+ nfs41_sequence_free_slot(clp, res);
+}
+
+/*
+ * nfs4_find_slot - efficiently look for a free slot
+ *
+ * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
+ * If found, we mark the slot as used, update the highest_used_slotid,
+ * and respectively set up the sequence operation args.
+ * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
+ *
+ * Note: must be called with under the slot_tbl_lock.
+ */
+static u8
+nfs4_find_slot(struct nfs4_slot_table *tbl, struct rpc_task *task)
+{
+ int slotid;
+ u8 ret_id = NFS4_MAX_SLOT_TABLE;
+ BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
+
+ dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid,
+ tbl->max_slots);
+ slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
+ if (slotid >= tbl->max_slots)
+ goto out;
+ __set_bit(slotid, tbl->used_slots);
+ if (slotid > tbl->highest_used_slotid)
+ tbl->highest_used_slotid = slotid;
+ ret_id = slotid;
+out:
+ dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
+ return ret_id;
+}
+
+static int nfs4_recover_session(struct nfs4_session *session)
+{
+ struct nfs_client *clp = session->clp;
+ int ret;
+
+ for (;;) {
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret != 0)
+ return ret;
+ if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ break;
+ nfs4_schedule_state_manager(clp);
+ }
+ return 0;
+}
+
+static int nfs41_setup_sequence(struct nfs4_session *session,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply,
+ struct rpc_task *task)
+{
+ struct nfs4_slot *slot;
+ struct nfs4_slot_table *tbl;
+ int status = 0;
+ u8 slotid;
+
+ dprintk("--> %s\n", __func__);
+ /* slot already allocated? */
+ if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
+ return 0;
+
+ memset(res, 0, sizeof(*res));
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ tbl = &session->fc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) {
+ if (tbl->highest_used_slotid != -1) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("<-- %s: Session reset: draining\n", __func__);
+ return -EAGAIN;
+ }
+
+ /* The slot table is empty; start the reset thread */
+ dprintk("%s Session Reset\n", __func__);
+ spin_unlock(&tbl->slot_tbl_lock);
+ status = nfs4_recover_session(session);
+ if (status)
+ return status;
+ spin_lock(&tbl->slot_tbl_lock);
+ }
+
+ slotid = nfs4_find_slot(tbl, task);
+ if (slotid == NFS4_MAX_SLOT_TABLE) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("<-- %s: no free slots\n", __func__);
+ return -EAGAIN;
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+
+ slot = tbl->slots + slotid;
+ args->sa_session = session;
+ args->sa_slotid = slotid;
+ args->sa_cache_this = cache_reply;
+
+ dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
+
+ res->sr_session = session;
+ res->sr_slotid = slotid;
+ res->sr_renewal_time = jiffies;
+ /*
+ * sr_status is only set in decode_sequence, and so will remain
+ * set to 1 if an rpc level failure occurs.
+ */
+ res->sr_status = 1;
+ return 0;
+}
+
+int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply,
+ struct rpc_task *task)
+{
+ int ret = 0;
+
+ dprintk("--> %s clp %p session %p sr_slotid %d\n",
+ __func__, clp, clp->cl_session, res->sr_slotid);
+
+ if (!nfs4_has_session(clp))
+ goto out;
+ ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
+ task);
+ if (ret != -EAGAIN) {
+ /* terminate rpc task */
+ task->tk_status = ret;
+ task->tk_action = NULL;
+ }
+out:
+ dprintk("<-- %s status=%d\n", __func__, ret);
+ return ret;
+}
+
+struct nfs41_call_sync_data {
+ struct nfs_client *clp;
+ struct nfs4_sequence_args *seq_args;
+ struct nfs4_sequence_res *seq_res;
+ int cache_reply;
+};
+
+static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs41_call_sync_data *data = calldata;
+
+ dprintk("--> %s data->clp->cl_session %p\n", __func__,
+ data->clp->cl_session);
+ if (nfs4_setup_sequence(data->clp, data->seq_args,
+ data->seq_res, data->cache_reply, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs41_call_sync_data *data = calldata;
+
+ nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
+ nfs41_sequence_free_slot(data->clp, data->seq_res);
+}
+
+struct rpc_call_ops nfs41_call_sync_ops = {
+ .rpc_call_prepare = nfs41_call_sync_prepare,
+ .rpc_call_done = nfs41_call_sync_done,
+};
+
+static int nfs4_call_sync_sequence(struct nfs_client *clp,
+ struct rpc_clnt *clnt,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ int ret;
+ struct rpc_task *task;
+ struct nfs41_call_sync_data data = {
+ .clp = clp,
+ .seq_args = args,
+ .seq_res = res,
+ .cache_reply = cache_reply,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clnt,
+ .rpc_message = msg,
+ .callback_ops = &nfs41_call_sync_ops,
+ .callback_data = &data
+ };
+
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ task = rpc_run_task(&task_setup);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else {
+ ret = task->tk_status;
+ rpc_put_task(task);
+ }
+ return ret;
+}
+
+int _nfs4_call_sync_session(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ return nfs4_call_sync_sequence(server->nfs_client, server->client,
+ msg, args, res, cache_reply);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+int _nfs4_call_sync(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ args->sa_session = res->sr_session = NULL;
+ return rpc_call_sync(server->client, msg, 0);
+}
+
+#define nfs4_call_sync(server, msg, args, res, cache_reply) \
+ (server)->nfs_client->cl_call_sync((server), (msg), &(args)->seq_args, \
+ &(res)->seq_res, (cache_reply))
+
+static void nfs4_sequence_done(const struct nfs_server *server,
+ struct nfs4_sequence_res *res, int rpc_status)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(server->nfs_client))
+ nfs41_sequence_done(server->nfs_client, res, rpc_status);
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/* no restart, therefore free slot here */
+static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
+ struct nfs4_sequence_res *res,
+ int rpc_status)
+{
+ nfs4_sequence_done(server, res, rpc_status);
+ nfs4_sequence_free_slot(server->nfs_client, res);
+}
+
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -312,6 +680,7 @@
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
+ p->o_res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
@@ -804,16 +1173,30 @@
err = _nfs4_open_delegation_recall(ctx, state, stateid);
switch (err) {
case 0:
- return err;
+ case -ENOENT:
+ case -ESTALE:
+ goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */
nfs4_schedule_state_recovery(server->nfs_client);
- return err;
+ goto out;
+ case -ERESTARTSYS:
+ /*
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+ case -ENOMEM:
+ err = 0;
+ goto out;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
+out:
return err;
}
@@ -929,6 +1312,10 @@
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
}
data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
+ &data->o_arg.seq_args,
+ &data->o_res.seq_res, 1, task))
+ return;
rpc_call_start(task);
return;
out_no_action:
@@ -941,6 +1328,10 @@
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
+
+ nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res,
+ task->tk_status);
+
if (RPC_ASSASSINATED(task))
return;
if (task->tk_status == 0) {
@@ -1269,7 +1660,7 @@
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
return status;
@@ -1318,6 +1709,7 @@
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
+ nfs4_sequence_done(server, &calldata->res.seq_res, task->tk_status);
if (RPC_ASSASSINATED(task))
return;
/* hmm. we are done with the inode, and in the process of freeing
@@ -1336,10 +1728,11 @@
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return;
}
}
+ nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
@@ -1380,6 +1773,10 @@
calldata->arg.fmode = FMODE_WRITE;
}
calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
+ &calldata->arg.seq_args, &calldata->res.seq_res,
+ 1, task))
+ return;
rpc_call_start(task);
}
@@ -1419,13 +1816,15 @@
};
int status = -ENOMEM;
- calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
+ calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
if (calldata == NULL)
goto out;
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
calldata->arg.stateid = &state->open_stateid;
+ if (nfs4_has_session(server->nfs_client))
+ memset(calldata->arg.stateid->data, 0, 4); /* clear seqid */
/* Serialization for the sequence id */
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
if (calldata->arg.seqid == NULL)
@@ -1435,6 +1834,7 @@
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
+ calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
calldata->path.mnt = mntget(path->mnt);
calldata->path.dentry = dget(path->dentry);
@@ -1584,15 +1984,18 @@
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
+ struct nfs4_server_caps_arg args = {
+ .fhandle = fhandle,
+ };
struct nfs4_server_caps_res res = {};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
- .rpc_argp = fhandle,
+ .rpc_argp = &args,
.rpc_resp = &res,
};
int status;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
@@ -1606,6 +2009,7 @@
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
server->acl_bitmask = res.acl_bitmask;
}
+
return status;
}
@@ -1637,8 +2041,15 @@
.rpc_argp = &args,
.rpc_resp = &res,
};
+ int status;
+
nfs_fattr_init(info->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_recover_expired_lease(server);
+ if (!status)
+ status = nfs4_check_client_ready(server->nfs_client);
+ if (!status)
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ return status;
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -1728,7 +2139,7 @@
};
nfs_fattr_init(fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
@@ -1812,7 +2223,7 @@
nfs_fattr_init(fattr);
dprintk("NFS call lookupfh %s\n", name->name);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
dprintk("NFS reply lookupfh: %d\n", status);
return status;
}
@@ -1898,7 +2309,7 @@
args.access |= NFS4_ACCESS_EXECUTE;
}
nfs_fattr_init(&fattr);
- status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (!status) {
entry->mask = 0;
if (res.access & NFS4_ACCESS_READ)
@@ -1957,13 +2368,14 @@
.pglen = pglen,
.pages = &page,
};
+ struct nfs4_readlink_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
.rpc_argp = &args,
- .rpc_resp = NULL,
+ .rpc_resp = &res,
};
- return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ return nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
}
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
@@ -2057,7 +2469,7 @@
int status;
nfs_fattr_init(&res.dir_attr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, &res.dir_attr);
@@ -2092,8 +2504,10 @@
{
struct nfs_removeres *res = task->tk_msg.rpc_resp;
+ nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
+ nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res);
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, &res->dir_attr);
return 1;
@@ -2125,7 +2539,7 @@
nfs_fattr_init(res.old_fattr);
nfs_fattr_init(res.new_fattr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
@@ -2174,7 +2588,7 @@
nfs_fattr_init(res.fattr);
nfs_fattr_init(res.dir_attr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
@@ -2235,7 +2649,8 @@
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
{
- int status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0);
+ int status = nfs4_call_sync(NFS_SERVER(dir), &data->msg,
+ &data->arg, &data->res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
nfs_post_op_update_inode(dir, data->res.dir_fattr);
@@ -2344,7 +2759,7 @@
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
- status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0);
if (status == 0)
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
@@ -2422,14 +2837,17 @@
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_statfs_res res = {
+ .fsstat = fsstat,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
.rpc_argp = &args,
- .rpc_resp = fsstat,
+ .rpc_resp = &res,
};
nfs_fattr_init(fsstat->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
@@ -2451,13 +2869,16 @@
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_fsinfo_res res = {
+ .fsinfo = fsinfo,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
.rpc_argp = &args,
- .rpc_resp = fsinfo,
+ .rpc_resp = &res,
};
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
@@ -2486,10 +2907,13 @@
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_pathconf_res res = {
+ .pathconf = pathconf,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
.rpc_argp = &args,
- .rpc_resp = pathconf,
+ .rpc_resp = &res,
};
/* None of the pathconf attributes are mandatory to implement */
@@ -2499,7 +2923,7 @@
}
nfs_fattr_init(pathconf->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -2520,8 +2944,13 @@
{
struct nfs_server *server = NFS_SERVER(data->inode);
+ dprintk("--> %s\n", __func__);
+
+ /* nfs4_sequence_free_slot called in the read rpc_call_done */
+ nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
+
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
@@ -2541,8 +2970,12 @@
{
struct inode *inode = data->inode;
+ /* slot is freed in nfs_writeback_done */
+ nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
+ task->tk_status);
+
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@@ -2567,10 +3000,14 @@
{
struct inode *inode = data->inode;
+ nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
+ task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
+ nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
+ &data->res.seq_res);
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
@@ -2603,6 +3040,9 @@
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
+ dprintk("%s calling put_rpccred on rpc_cred %p\n", __func__,
+ task->tk_msg.rpc_cred);
+ put_rpccred(task->tk_msg.rpc_cred);
}
static const struct rpc_call_ops nfs4_renew_ops = {
@@ -2742,12 +3182,14 @@
.acl_pages = pages,
.acl_len = buflen,
};
- size_t resp_len = buflen;
+ struct nfs_getaclres res = {
+ .acl_len = buflen,
+ };
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
- .rpc_resp = &resp_len,
+ .rpc_resp = &res,
};
struct page *localpage = NULL;
int ret;
@@ -2761,26 +3203,26 @@
return -ENOMEM;
args.acl_pages[0] = localpage;
args.acl_pgbase = 0;
- resp_len = args.acl_len = PAGE_SIZE;
+ args.acl_len = PAGE_SIZE;
} else {
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
}
- ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ ret = nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
if (ret)
goto out_free;
- if (resp_len > args.acl_len)
- nfs4_write_cached_acl(inode, NULL, resp_len);
+ if (res.acl_len > args.acl_len)
+ nfs4_write_cached_acl(inode, NULL, res.acl_len);
else
- nfs4_write_cached_acl(inode, resp_buf, resp_len);
+ nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
if (buf) {
ret = -ERANGE;
- if (resp_len > buflen)
+ if (res.acl_len > buflen)
goto out_free;
if (localpage)
- memcpy(buf, resp_buf, resp_len);
+ memcpy(buf, resp_buf, res.acl_len);
}
- ret = resp_len;
+ ret = res.acl_len;
out_free:
if (localpage)
__free_page(localpage);
@@ -2810,8 +3252,6 @@
ret = nfs_revalidate_inode(server, inode);
if (ret < 0)
return ret;
- if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
- nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
return ret;
@@ -2827,10 +3267,11 @@
.acl_pages = pages,
.acl_len = buflen,
};
+ struct nfs_setaclres res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
.rpc_argp = &arg,
- .rpc_resp = NULL,
+ .rpc_resp = &res,
};
int ret;
@@ -2838,7 +3279,7 @@
return -EOPNOTSUPP;
nfs_inode_return_delegation(inode);
buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
- ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
return ret;
@@ -2857,10 +3298,8 @@
}
static int
-nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
+_nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs_client *clp, struct nfs4_state *state)
{
- struct nfs_client *clp = server->nfs_client;
-
if (!clp || task->tk_status >= 0)
return 0;
switch(task->tk_status) {
@@ -2879,8 +3318,23 @@
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0;
return -EAGAIN;
+#if defined(CONFIG_NFS_V4_1)
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session\n", __func__,
+ task->tk_status);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ task->tk_status = 0;
+ return -EAGAIN;
+#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
- nfs_inc_server_stats(server, NFSIOS_DELAY);
+ if (server)
+ nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0;
@@ -2893,6 +3347,12 @@
return 0;
}
+static int
+nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
+{
+ return _nfs4_async_handle_error(task, server, server->nfs_client, state);
+}
+
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
{
nfs4_verifier sc_verifier;
@@ -3000,6 +3460,10 @@
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
+
+ nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res,
+ task->tk_status);
+
data->rpc_status = task->tk_status;
if (data->rpc_status == 0)
renew_lease(data->res.server, data->timestamp);
@@ -3010,7 +3474,25 @@
kfree(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_delegreturndata *d_data;
+
+ d_data = (struct nfs4_delegreturndata *)data;
+
+ if (nfs4_setup_sequence(d_data->res.server->nfs_client,
+ &d_data->args.seq_args,
+ &d_data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs4_delegreturn_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs4_delegreturn_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs4_delegreturn_done,
.rpc_release = nfs4_delegreturn_release,
};
@@ -3032,7 +3514,7 @@
};
int status = 0;
- data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
data->args.fhandle = &data->fh;
@@ -3042,6 +3524,7 @@
memcpy(&data->stateid, stateid, sizeof(data->stateid));
data->res.fattr = &data->fattr;
data->res.server = server;
+ data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
@@ -3127,7 +3610,7 @@
goto out;
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_id.id;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
switch (status) {
case 0:
request->fl_type = F_UNLCK;
@@ -3187,13 +3670,14 @@
struct nfs4_unlockdata *p;
struct inode *inode = lsp->ls_state->inode;
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
@@ -3217,6 +3701,8 @@
{
struct nfs4_unlockdata *calldata = data;
+ nfs4_sequence_done(calldata->server, &calldata->res.seq_res,
+ task->tk_status);
if (RPC_ASSASSINATED(task))
return;
switch (task->tk_status) {
@@ -3233,8 +3719,11 @@
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
- rpc_restart_call(task);
+ nfs4_restart_rpc(task,
+ calldata->server->nfs_client);
}
+ nfs4_sequence_free_slot(calldata->server->nfs_client,
+ &calldata->res.seq_res);
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
@@ -3249,6 +3738,10 @@
return;
}
calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(calldata->server->nfs_client,
+ &calldata->arg.seq_args,
+ &calldata->res.seq_res, 1, task))
+ return;
rpc_call_start(task);
}
@@ -3341,6 +3834,7 @@
unsigned long timestamp;
int rpc_status;
int cancelled;
+ struct nfs_server *server;
};
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
@@ -3366,7 +3860,9 @@
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
p->res.lock_seqid = p->arg.lock_seqid;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
p->lsp = lsp;
+ p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
@@ -3396,6 +3892,9 @@
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
rpc_call_start(task);
dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
@@ -3406,6 +3905,9 @@
dprintk("%s: begin!\n", __func__);
+ nfs4_sequence_done_free_slot(data->server, &data->res.seq_res,
+ task->tk_status);
+
data->rpc_status = task->tk_status;
if (RPC_ASSASSINATED(task))
goto out;
@@ -3487,8 +3989,6 @@
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
- if (ret == -NFS4ERR_DENIED)
- ret = -EAGAIN;
} else
data->cancelled = 1;
rpc_put_task(task);
@@ -3576,9 +4076,11 @@
int err;
do {
+ err = _nfs4_proc_setlk(state, cmd, request);
+ if (err == -NFS4ERR_DENIED)
+ err = -EAGAIN;
err = nfs4_handle_exception(NFS_SERVER(state->inode),
- _nfs4_proc_setlk(state, cmd, request),
- &exception);
+ err, &exception);
} while (exception.retry);
return err;
}
@@ -3630,8 +4132,37 @@
goto out;
do {
err = _nfs4_do_setlk(state, F_SETLK, fl, 0);
- if (err != -NFS4ERR_DELAY)
- break;
+ switch (err) {
+ default:
+ printk(KERN_ERR "%s: unhandled error %d.\n",
+ __func__, err);
+ case 0:
+ case -ESTALE:
+ goto out;
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ nfs4_schedule_state_recovery(server->nfs_client);
+ goto out;
+ case -ERESTARTSYS:
+ /*
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OPENMODE:
+ nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+ err = 0;
+ goto out;
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
+ /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ err = 0;
+ goto out;
+ case -NFS4ERR_DELAY:
+ break;
+ }
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
@@ -3706,10 +4237,13 @@
.page = page,
.bitmask = bitmask,
};
+ struct nfs4_fs_locations_res res = {
+ .fs_locations = fs_locations,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
.rpc_argp = &args,
- .rpc_resp = fs_locations,
+ .rpc_resp = &res,
};
int status;
@@ -3717,24 +4251,720 @@
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
nfs_fixup_referral_attributes(&fs_locations->fattr);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
-struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = {
+#ifdef CONFIG_NFS_V4_1
+/*
+ * nfs4_proc_exchange_id()
+ *
+ * Since the clientid has expired, all compounds using sessions
+ * associated with the stale clientid will be returning
+ * NFS4ERR_BADSESSION in the sequence operation, and will therefore
+ * be in some phase of session reset.
+ */
+static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ nfs4_verifier verifier;
+ struct nfs41_exchange_id_args args = {
+ .client = clp,
+ .flags = clp->cl_exchange_flags,
+ };
+ struct nfs41_exchange_id_res res = {
+ .client = clp,
+ };
+ int status;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ __be32 *p;
+
+ dprintk("--> %s\n", __func__);
+ BUG_ON(clp == NULL);
+
+ p = (u32 *)verifier.data;
+ *p++ = htonl((u32)clp->cl_boot_time.tv_sec);
+ *p = htonl((u32)clp->cl_boot_time.tv_nsec);
+ args.verifier = &verifier;
+
+ while (1) {
+ args.id_len = scnprintf(args.id, sizeof(args.id),
+ "%s/%s %u",
+ clp->cl_ipaddr,
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR),
+ clp->cl_id_uniquifier);
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+
+ if (status != NFS4ERR_CLID_INUSE)
+ break;
+
+ if (signalled())
+ break;
+
+ if (++clp->cl_id_uniquifier == 0)
+ break;
+ }
+
+ dprintk("<-- %s status= %d\n", __func__, status);
+ return status;
+}
+
+struct nfs4_get_lease_time_data {
+ struct nfs4_get_lease_time_args *args;
+ struct nfs4_get_lease_time_res *res;
+ struct nfs_client *clp;
+};
+
+static void nfs4_get_lease_time_prepare(struct rpc_task *task,
+ void *calldata)
+{
+ int ret;
+ struct nfs4_get_lease_time_data *data =
+ (struct nfs4_get_lease_time_data *)calldata;
+
+ dprintk("--> %s\n", __func__);
+ /* just setup sequence, do not trigger session recovery
+ since we're invoked within one */
+ ret = nfs41_setup_sequence(data->clp->cl_session,
+ &data->args->la_seq_args,
+ &data->res->lr_seq_res, 0, task);
+
+ BUG_ON(ret == -EAGAIN);
+ rpc_call_start(task);
+ dprintk("<-- %s\n", __func__);
+}
+
+/*
+ * Called from nfs4_state_manager thread for session setup, so don't recover
+ * from sequence operation or clientid errors.
+ */
+static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_get_lease_time_data *data =
+ (struct nfs4_get_lease_time_data *)calldata;
+
+ dprintk("--> %s\n", __func__);
+ nfs41_sequence_done(data->clp, &data->res->lr_seq_res, task->tk_status);
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
+ rpc_delay(task, NFS4_POLL_RETRY_MIN);
+ task->tk_status = 0;
+ nfs4_restart_rpc(task, data->clp);
+ return;
+ }
+ nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
+ dprintk("<-- %s\n", __func__);
+}
+
+struct rpc_call_ops nfs4_get_lease_time_ops = {
+ .rpc_call_prepare = nfs4_get_lease_time_prepare,
+ .rpc_call_done = nfs4_get_lease_time_done,
+};
+
+int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
+{
+ struct rpc_task *task;
+ struct nfs4_get_lease_time_args args;
+ struct nfs4_get_lease_time_res res = {
+ .lr_fsinfo = fsinfo,
+ };
+ struct nfs4_get_lease_time_data data = {
+ .args = &args,
+ .res = &res,
+ .clp = clp,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_get_lease_time_ops,
+ .callback_data = &data
+ };
+ int status;
+
+ res.lr_seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
+ dprintk("--> %s\n", __func__);
+ task = rpc_run_task(&task_setup);
+
+ if (IS_ERR(task))
+ status = PTR_ERR(task);
+ else {
+ status = task->tk_status;
+ rpc_put_task(task);
+ }
+ dprintk("<-- %s return %d\n", __func__, status);
+
+ return status;
+}
+
+/*
+ * Reset a slot table
+ */
+static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots,
+ int old_max_slots, int ivalue)
+{
+ int i;
+ int ret = 0;
+
+ dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl);
+
+ /*
+ * Until we have dynamic slot table adjustment, insist
+ * upon the same slot table size
+ */
+ if (max_slots != old_max_slots) {
+ dprintk("%s reset slot table does't match old\n",
+ __func__);
+ ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */
+ goto out;
+ }
+ spin_lock(&tbl->slot_tbl_lock);
+ for (i = 0; i < max_slots; ++i)
+ tbl->slots[i].seq_nr = ivalue;
+ tbl->highest_used_slotid = -1;
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
+ tbl, tbl->slots, tbl->max_slots);
+out:
+ dprintk("<-- %s: return %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * Reset the forechannel and backchannel slot tables
+ */
+static int nfs4_reset_slot_tables(struct nfs4_session *session)
+{
+ int status;
+
+ status = nfs4_reset_slot_table(&session->fc_slot_table,
+ session->fc_attrs.max_reqs,
+ session->fc_slot_table.max_slots,
+ 1);
+ if (status)
+ return status;
+
+ status = nfs4_reset_slot_table(&session->bc_slot_table,
+ session->bc_attrs.max_reqs,
+ session->bc_slot_table.max_slots,
+ 0);
+ return status;
+}
+
+/* Destroy the slot table */
+static void nfs4_destroy_slot_tables(struct nfs4_session *session)
+{
+ if (session->fc_slot_table.slots != NULL) {
+ kfree(session->fc_slot_table.slots);
+ session->fc_slot_table.slots = NULL;
+ }
+ if (session->bc_slot_table.slots != NULL) {
+ kfree(session->bc_slot_table.slots);
+ session->bc_slot_table.slots = NULL;
+ }
+ return;
+}
+
+/*
+ * Initialize slot table
+ */
+static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
+ int max_slots, int ivalue)
+{
+ int i;
+ struct nfs4_slot *slot;
+ int ret = -ENOMEM;
+
+ BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
+
+ dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
+
+ slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
+ if (!slot)
+ goto out;
+ for (i = 0; i < max_slots; ++i)
+ slot[i].seq_nr = ivalue;
+ ret = 0;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (tbl->slots != NULL) {
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: slot table already initialized. tbl=%p slots=%p\n",
+ __func__, tbl, tbl->slots);
+ WARN_ON(1);
+ goto out_free;
+ }
+ tbl->max_slots = max_slots;
+ tbl->slots = slot;
+ tbl->highest_used_slotid = -1; /* no slot is currently used */
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
+ tbl, tbl->slots, tbl->max_slots);
+out:
+ dprintk("<-- %s: return %d\n", __func__, ret);
+ return ret;
+
+out_free:
+ kfree(slot);
+ goto out;
+}
+
+/*
+ * Initialize the forechannel and backchannel tables
+ */
+static int nfs4_init_slot_tables(struct nfs4_session *session)
+{
+ int status;
+
+ status = nfs4_init_slot_table(&session->fc_slot_table,
+ session->fc_attrs.max_reqs, 1);
+ if (status)
+ return status;
+
+ status = nfs4_init_slot_table(&session->bc_slot_table,
+ session->bc_attrs.max_reqs, 0);
+ if (status)
+ nfs4_destroy_slot_tables(session);
+
+ return status;
+}
+
+struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session;
+ struct nfs4_slot_table *tbl;
+
+ session = kzalloc(sizeof(struct nfs4_session), GFP_KERNEL);
+ if (!session)
+ return NULL;
+
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ /*
+ * The create session reply races with the server back
+ * channel probe. Mark the client NFS_CS_SESSION_INITING
+ * so that the client back channel can find the
+ * nfs_client struct
+ */
+ clp->cl_cons_state = NFS_CS_SESSION_INITING;
+
+ tbl = &session->fc_slot_table;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+
+ tbl = &session->bc_slot_table;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+
+ session->clp = clp;
+ return session;
+}
+
+void nfs4_destroy_session(struct nfs4_session *session)
+{
+ nfs4_proc_destroy_session(session);
+ dprintk("%s Destroy backchannel for xprt %p\n",
+ __func__, session->clp->cl_rpcclient->cl_xprt);
+ xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
+ NFS41_BC_MIN_CALLBACKS);
+ nfs4_destroy_slot_tables(session);
+ kfree(session);
+}
+
+/*
+ * Initialize the values to be used by the client in CREATE_SESSION
+ * If nfs4_init_session set the fore channel request and response sizes,
+ * use them.
+ *
+ * Set the back channel max_resp_sz_cached to zero to force the client to
+ * always set csa_cachethis to FALSE because the current implementation
+ * of the back channel DRC only supports caching the CB_SEQUENCE operation.
+ */
+static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
+{
+ struct nfs4_session *session = args->client->cl_session;
+ unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
+ mxresp_sz = session->fc_attrs.max_resp_sz;
+
+ if (mxrqst_sz == 0)
+ mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
+ if (mxresp_sz == 0)
+ mxresp_sz = NFS_MAX_FILE_IO_SIZE;
+ /* Fore channel attributes */
+ args->fc_attrs.headerpadsz = 0;
+ args->fc_attrs.max_rqst_sz = mxrqst_sz;
+ args->fc_attrs.max_resp_sz = mxresp_sz;
+ args->fc_attrs.max_resp_sz_cached = mxresp_sz;
+ args->fc_attrs.max_ops = NFS4_MAX_OPS;
+ args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
+
+ dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
+ "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
+ __func__,
+ args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
+ args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops,
+ args->fc_attrs.max_reqs);
+
+ /* Back channel attributes */
+ args->bc_attrs.headerpadsz = 0;
+ args->bc_attrs.max_rqst_sz = PAGE_SIZE;
+ args->bc_attrs.max_resp_sz = PAGE_SIZE;
+ args->bc_attrs.max_resp_sz_cached = 0;
+ args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
+ args->bc_attrs.max_reqs = 1;
+
+ dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
+ "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
+ __func__,
+ args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
+ args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
+ args->bc_attrs.max_reqs);
+}
+
+static int _verify_channel_attr(char *chan, char *attr_name, u32 sent, u32 rcvd)
+{
+ if (rcvd <= sent)
+ return 0;
+ printk(KERN_WARNING "%s: Session INVALID: %s channel %s increased. "
+ "sent=%u rcvd=%u\n", __func__, chan, attr_name, sent, rcvd);
+ return -EINVAL;
+}
+
+#define _verify_fore_channel_attr(_name_) \
+ _verify_channel_attr("fore", #_name_, \
+ args->fc_attrs._name_, \
+ session->fc_attrs._name_)
+
+#define _verify_back_channel_attr(_name_) \
+ _verify_channel_attr("back", #_name_, \
+ args->bc_attrs._name_, \
+ session->bc_attrs._name_)
+
+/*
+ * The server is not allowed to increase the fore channel header pad size,
+ * maximum response size, or maximum number of operations.
+ *
+ * The back channel attributes are only negotiatied down: We send what the
+ * (back channel) server insists upon.
+ */
+static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
+ struct nfs4_session *session)
+{
+ int ret = 0;
+
+ ret |= _verify_fore_channel_attr(headerpadsz);
+ ret |= _verify_fore_channel_attr(max_resp_sz);
+ ret |= _verify_fore_channel_attr(max_ops);
+
+ ret |= _verify_back_channel_attr(headerpadsz);
+ ret |= _verify_back_channel_attr(max_rqst_sz);
+ ret |= _verify_back_channel_attr(max_resp_sz);
+ ret |= _verify_back_channel_attr(max_resp_sz_cached);
+ ret |= _verify_back_channel_attr(max_ops);
+ ret |= _verify_back_channel_attr(max_reqs);
+
+ return ret;
+}
+
+static int _nfs4_proc_create_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session = clp->cl_session;
+ struct nfs41_create_session_args args = {
+ .client = clp,
+ .cb_program = NFS4_CALLBACK,
+ };
+ struct nfs41_create_session_res res = {
+ .client = clp,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ nfs4_init_channel_attrs(&args);
+ args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
+
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
+
+ if (!status)
+ /* Verify the session's negotiated channel_attrs values */
+ status = nfs4_verify_channel_attrs(&args, session);
+ if (!status) {
+ /* Increment the clientid slot sequence id */
+ clp->cl_seqid++;
+ }
+
+ return status;
+}
+
+/*
+ * Issues a CREATE_SESSION operation to the server.
+ * It is the responsibility of the caller to verify the session is
+ * expired before calling this routine.
+ */
+int nfs4_proc_create_session(struct nfs_client *clp, int reset)
+{
+ int status;
+ unsigned *ptr;
+ struct nfs_fsinfo fsinfo;
+ struct nfs4_session *session = clp->cl_session;
+
+ dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
+
+ status = _nfs4_proc_create_session(clp);
+ if (status)
+ goto out;
+
+ /* Init or reset the fore channel */
+ if (reset)
+ status = nfs4_reset_slot_tables(session);
+ else
+ status = nfs4_init_slot_tables(session);
+ dprintk("fore channel slot table initialization returned %d\n", status);
+ if (status)
+ goto out;
+
+ ptr = (unsigned *)&session->sess_id.data[0];
+ dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
+ clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
+
+ if (reset)
+ /* Lease time is aleady set */
+ goto out;
+
+ /* Get the lease time */
+ status = nfs4_proc_get_lease_time(clp, &fsinfo);
+ if (status == 0) {
+ /* Update lease time and schedule renewal */
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = fsinfo.lease_time * HZ;
+ clp->cl_last_renewal = jiffies;
+ clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ spin_unlock(&clp->cl_lock);
+
+ nfs4_schedule_state_renewal(clp);
+ }
+out:
+ dprintk("<-- %s\n", __func__);
+ return status;
+}
+
+/*
+ * Issue the over-the-wire RPC DESTROY_SESSION.
+ * The caller must serialize access to this routine.
+ */
+int nfs4_proc_destroy_session(struct nfs4_session *session)
+{
+ int status = 0;
+ struct rpc_message msg;
+
+ dprintk("--> nfs4_proc_destroy_session\n");
+
+ /* session is still being setup */
+ if (session->clp->cl_cons_state != NFS_CS_READY)
+ return status;
+
+ msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
+ msg.rpc_argp = session;
+ msg.rpc_resp = NULL;
+ msg.rpc_cred = NULL;
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
+
+ if (status)
+ printk(KERN_WARNING
+ "Got error %d from the server on DESTROY_SESSION. "
+ "Session has been destroyed regardless...\n", status);
+
+ dprintk("<-- nfs4_proc_destroy_session\n");
+ return status;
+}
+
+/*
+ * Renew the cl_session lease.
+ */
+static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct nfs4_sequence_args args;
+ struct nfs4_sequence_res res;
+
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+
+ args.sa_cache_this = 0;
+
+ return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args,
+ &res, 0);
+}
+
+void nfs41_sequence_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_client *clp = (struct nfs_client *)data;
+
+ nfs41_sequence_done(clp, task->tk_msg.rpc_resp, task->tk_status);
+
+ if (task->tk_status < 0) {
+ dprintk("%s ERROR %d\n", __func__, task->tk_status);
+
+ if (_nfs4_async_handle_error(task, NULL, clp, NULL)
+ == -EAGAIN) {
+ nfs4_restart_rpc(task, clp);
+ return;
+ }
+ }
+ nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
+ dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
+
+ put_rpccred(task->tk_msg.rpc_cred);
+ kfree(task->tk_msg.rpc_argp);
+ kfree(task->tk_msg.rpc_resp);
+
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs_client *clp;
+ struct nfs4_sequence_args *args;
+ struct nfs4_sequence_res *res;
+
+ clp = (struct nfs_client *)data;
+ args = task->tk_msg.rpc_argp;
+ res = task->tk_msg.rpc_resp;
+
+ if (nfs4_setup_sequence(clp, args, res, 0, task))
+ return;
+ rpc_call_start(task);
+}
+
+static const struct rpc_call_ops nfs41_sequence_ops = {
+ .rpc_call_done = nfs41_sequence_call_done,
+ .rpc_call_prepare = nfs41_sequence_prepare,
+};
+
+static int nfs41_proc_async_sequence(struct nfs_client *clp,
+ struct rpc_cred *cred)
+{
+ struct nfs4_sequence_args *args;
+ struct nfs4_sequence_res *res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
+ .rpc_cred = cred,
+ };
+
+ args = kzalloc(sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ kfree(args);
+ return -ENOMEM;
+ }
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ msg.rpc_argp = args;
+ msg.rpc_resp = res;
+
+ return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
+ &nfs41_sequence_ops, (void *)clp);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
};
-struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops = {
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
+ .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
+ .recover_open = nfs4_open_reclaim,
+ .recover_lock = nfs4_lock_reclaim,
+ .establish_clid = nfs4_proc_exchange_id,
+ .get_clid_cred = nfs4_get_exchange_id_cred,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
+ .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
+ .recover_open = nfs4_open_expired,
+ .recover_lock = nfs4_lock_expired,
+ .establish_clid = nfs4_proc_exchange_id,
+ .get_clid_cred = nfs4_get_exchange_id_cred,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
+ .sched_state_renewal = nfs4_proc_async_renew,
+ .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
+ .renew_lease = nfs4_proc_renew,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
+ .sched_state_renewal = nfs41_proc_async_sequence,
+ .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
+ .renew_lease = nfs4_proc_sequence,
+};
+#endif
+
+/*
+ * Per minor version reboot and network partition recovery ops
+ */
+
+struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[] = {
+ &nfs40_reboot_recovery_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_reboot_recovery_ops,
+#endif
+};
+
+struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[] = {
+ &nfs40_nograce_recovery_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_nograce_recovery_ops,
+#endif
+};
+
+struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[] = {
+ &nfs40_state_renewal_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_state_renewal_ops,
+#endif
};
static const struct inode_operations nfs4_file_inode_operations = {
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index f524e93..e27c6ce 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,12 +59,14 @@
void
nfs4_renew_state(struct work_struct *work)
{
+ struct nfs4_state_maintenance_ops *ops;
struct nfs_client *clp =
container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred;
long lease, timeout;
unsigned long last, now;
+ ops = nfs4_state_renewal_ops[clp->cl_minorversion];
dprintk("%s: start\n", __func__);
/* Are there any active superblocks? */
if (list_empty(&clp->cl_superblocks))
@@ -76,7 +78,7 @@
timeout = (2 * lease) / 3 + (long)last - (long)now;
/* Are we close to a lease timeout? */
if (time_after(now, last + lease/3)) {
- cred = nfs4_get_renew_cred_locked(clp);
+ cred = ops->get_state_renewal_cred_locked(clp);
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
if (list_empty(&clp->cl_delegations)) {
@@ -86,7 +88,7 @@
nfs_expire_all_delegations(clp);
} else {
/* Queue an asynchronous RENEW. */
- nfs4_proc_async_renew(clp, cred);
+ ops->sched_state_renewal(clp, cred);
put_rpccred(cred);
}
timeout = (2 * lease) / 3;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0298e90..b73c5a7 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -60,7 +60,7 @@
static LIST_HEAD(nfs4_clientid_list);
-static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
unsigned short port;
int status;
@@ -77,7 +77,7 @@
return status;
}
-static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
{
struct rpc_cred *cred = NULL;
@@ -114,17 +114,21 @@
return cred;
}
-static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
+#if defined(CONFIG_NFS_V4_1)
+
+struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
spin_lock(&clp->cl_lock);
- cred = nfs4_get_renew_cred_locked(clp);
+ cred = nfs4_get_machine_cred_locked(clp);
spin_unlock(&clp->cl_lock);
return cred;
}
-static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+#endif /* CONFIG_NFS_V4_1 */
+
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
struct rb_node *pos;
@@ -738,12 +742,14 @@
void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
{
- if (status == -NFS4ERR_BAD_SEQID) {
- struct nfs4_state_owner *sp = container_of(seqid->sequence,
- struct nfs4_state_owner, so_seqid);
+ struct nfs4_state_owner *sp = container_of(seqid->sequence,
+ struct nfs4_state_owner, so_seqid);
+ struct nfs_server *server = sp->so_server;
+
+ if (status == -NFS4ERR_BAD_SEQID)
nfs4_drop_state_owner(sp);
- }
- nfs_increment_seqid(status, seqid);
+ if (!nfs4_has_session(server->nfs_client))
+ nfs_increment_seqid(status, seqid);
}
/*
@@ -847,32 +853,45 @@
struct file_lock *fl;
int status = 0;
+ if (inode->i_flock == NULL)
+ return 0;
+
+ /* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem);
+ /* Protect inode->i_flock using the BKL */
+ lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
+ unlock_kernel();
status = ops->recover_lock(state, fl);
- if (status >= 0)
- continue;
switch (status) {
+ case 0:
+ break;
+ case -ESTALE:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ case -NFS4ERR_STALE_CLIENTID:
+ goto out;
default:
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
__func__, status);
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_NO_GRACE:
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
- break;
- case -NFS4ERR_STALE_CLIENTID:
- goto out_err;
+ status = 0;
}
+ lock_kernel();
}
- up_write(&nfsi->rwsem);
- return 0;
-out_err:
+ unlock_kernel();
+out:
up_write(&nfsi->rwsem);
return status;
}
@@ -918,6 +937,7 @@
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
__func__, status);
case -ENOENT:
+ case -ENOMEM:
case -ESTALE:
/*
* Open state on this file cannot be recovered
@@ -928,6 +948,9 @@
/* Mark the file as being 'closed' */
state->state = 0;
break;
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
@@ -1042,6 +1065,14 @@
case -NFS4ERR_EXPIRED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
}
}
@@ -1075,18 +1106,22 @@
static int nfs4_check_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
+ struct nfs4_state_maintenance_ops *ops =
+ nfs4_state_renewal_ops[clp->cl_minorversion];
int status = -NFS4ERR_EXPIRED;
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
return 0;
- cred = nfs4_get_renew_cred(clp);
+ spin_lock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
if (cred == NULL) {
cred = nfs4_get_setclientid_cred(clp);
if (cred == NULL)
goto out;
}
- status = nfs4_proc_renew(clp, cred);
+ status = ops->renew_lease(clp, cred);
put_rpccred(cred);
out:
nfs4_recovery_handle_error(clp, status);
@@ -1096,21 +1131,98 @@
static int nfs4_reclaim_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
+ struct nfs4_state_recovery_ops *ops =
+ nfs4_reboot_recovery_ops[clp->cl_minorversion];
int status = -ENOENT;
- cred = nfs4_get_setclientid_cred(clp);
+ cred = ops->get_clid_cred(clp);
if (cred != NULL) {
- status = nfs4_init_client(clp, cred);
+ status = ops->establish_clid(clp, cred);
put_rpccred(cred);
/* Handle case where the user hasn't set up machine creds */
if (status == -EACCES && cred == clp->cl_machine_cred) {
nfs4_clear_machine_cred(clp);
status = -EAGAIN;
}
+ if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
+ status = -EPROTONOSUPPORT;
}
return status;
}
+#ifdef CONFIG_NFS_V4_1
+static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
+{
+ switch (err) {
+ case -NFS4ERR_STALE_CLIENTID:
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ }
+}
+
+static int nfs4_reset_session(struct nfs_client *clp)
+{
+ int status;
+
+ status = nfs4_proc_destroy_session(clp->cl_session);
+ if (status && status != -NFS4ERR_BADSESSION &&
+ status != -NFS4ERR_DEADSESSION) {
+ nfs4_session_recovery_handle_error(clp, status);
+ goto out;
+ }
+
+ memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
+ status = nfs4_proc_create_session(clp, 1);
+ if (status)
+ nfs4_session_recovery_handle_error(clp, status);
+ /* fall through*/
+out:
+ /* Wake up the next rpc task even on error */
+ rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
+ return status;
+}
+
+static int nfs4_initialize_session(struct nfs_client *clp)
+{
+ int status;
+
+ status = nfs4_proc_create_session(clp, 0);
+ if (!status) {
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+ } else if (status == -NFS4ERR_STALE_CLIENTID) {
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ } else {
+ nfs_mark_client_ready(clp, status);
+ }
+ return status;
+}
+#else /* CONFIG_NFS_V4_1 */
+static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
+static int nfs4_initialize_session(struct nfs_client *clp) { return 0; }
+#endif /* CONFIG_NFS_V4_1 */
+
+/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
+ * on EXCHANGE_ID for v4.1
+ */
+static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
+{
+ if (nfs4_has_session(clp)) {
+ switch (status) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_CLID_INUSE:
+ case -EAGAIN:
+ break;
+
+ case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+ * in nfs4_exchange_id */
+ default:
+ return;
+ }
+ }
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+}
+
static void nfs4_state_manager(struct nfs_client *clp)
{
int status = 0;
@@ -1121,9 +1233,12 @@
/* We're going to have to re-establish a clientid */
status = nfs4_reclaim_lease(clp);
if (status) {
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_set_lease_expired(clp, status);
if (status == -EAGAIN)
continue;
+ if (clp->cl_cons_state ==
+ NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, status);
goto out_error;
}
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
@@ -1134,25 +1249,44 @@
if (status != 0)
continue;
}
-
+ /* Initialize or reset the session */
+ if (nfs4_has_session(clp) &&
+ test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+ status = nfs4_initialize_session(clp);
+ else
+ status = nfs4_reset_session(clp);
+ if (status) {
+ if (status == -NFS4ERR_STALE_CLIENTID)
+ continue;
+ goto out_error;
+ }
+ }
/* First recover reboot state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
- status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops);
+ status = nfs4_do_reclaim(clp,
+ nfs4_reboot_recovery_ops[clp->cl_minorversion]);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
+ if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ continue;
nfs4_state_end_reclaim_reboot(clp);
continue;
}
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
- status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops);
+ status = nfs4_do_reclaim(clp,
+ nfs4_nograce_recovery_ops[clp->cl_minorversion]);
if (status < 0) {
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
if (status == -NFS4ERR_EXPIRED)
continue;
+ if (test_bit(NFS4CLNT_SESSION_SETUP,
+ &clp->cl_state))
+ continue;
goto out_error;
} else
nfs4_state_end_reclaim_nograce(clp);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 1690f0e..617273e 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -192,12 +192,16 @@
decode_verifier_maxsz)
#define encode_remove_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
+#define decode_remove_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz)
#define encode_rename_maxsz (op_encode_hdr_maxsz + \
2 * nfs4_name_maxsz)
-#define decode_rename_maxsz (op_decode_hdr_maxsz + 5 + 5)
+#define decode_rename_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz + \
+ decode_change_info_maxsz)
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
-#define decode_link_maxsz (op_decode_hdr_maxsz + 5)
+#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 8)
@@ -240,43 +244,115 @@
(encode_getattr_maxsz)
#define decode_fs_locations_maxsz \
(0)
+
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MACHINE_NAME_LEN (64)
+
+#define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \
+ encode_verifier_maxsz + \
+ 1 /* co_ownerid.len */ + \
+ XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \
+ 1 /* flags */ + \
+ 1 /* spa_how */ + \
+ 0 /* SP4_NONE (for now) */ + \
+ 1 /* zero implemetation id array */)
+#define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \
+ 2 /* eir_clientid */ + \
+ 1 /* eir_sequenceid */ + \
+ 1 /* eir_flags */ + \
+ 1 /* spr_how */ + \
+ 0 /* SP4_NONE (for now) */ + \
+ 2 /* eir_server_owner.so_minor_id */ + \
+ /* eir_server_owner.so_major_id<> */ \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
+ /* eir_server_scope<> */ \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
+ 1 /* eir_server_impl_id array length */ + \
+ 0 /* ignored eir_server_impl_id contents */)
+#define encode_channel_attrs_maxsz (6 + 1 /* ca_rdma_ird.len (0) */)
+#define decode_channel_attrs_maxsz (6 + \
+ 1 /* ca_rdma_ird.len */ + \
+ 1 /* ca_rdma_ird */)
+#define encode_create_session_maxsz (op_encode_hdr_maxsz + \
+ 2 /* csa_clientid */ + \
+ 1 /* csa_sequence */ + \
+ 1 /* csa_flags */ + \
+ encode_channel_attrs_maxsz + \
+ encode_channel_attrs_maxsz + \
+ 1 /* csa_cb_program */ + \
+ 1 /* csa_sec_parms.len (1) */ + \
+ 1 /* cb_secflavor (AUTH_SYS) */ + \
+ 1 /* stamp */ + \
+ 1 /* machinename.len */ + \
+ XDR_QUADLEN(NFS4_MAX_MACHINE_NAME_LEN) + \
+ 1 /* uid */ + \
+ 1 /* gid */ + \
+ 1 /* gids.len (0) */)
+#define decode_create_session_maxsz (op_decode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+ 1 /* csr_sequence */ + \
+ 1 /* csr_flags */ + \
+ decode_channel_attrs_maxsz + \
+ decode_channel_attrs_maxsz)
+#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_destroy_session_maxsz (op_decode_hdr_maxsz)
+#define encode_sequence_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
+#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
+#else /* CONFIG_NFS_V4_1 */
+#define encode_sequence_maxsz 0
+#define decode_sequence_maxsz 0
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFS4_enc_compound_sz (1024) /* XXX: large enough? */
#define NFS4_dec_compound_sz (1024) /* XXX: large enough? */
#define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_read_maxsz)
#define NFS4_dec_read_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_read_maxsz)
#define NFS4_enc_readlink_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_readlink_maxsz)
#define NFS4_dec_readlink_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_readlink_maxsz)
#define NFS4_enc_readdir_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_readdir_maxsz)
#define NFS4_dec_readdir_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_readdir_maxsz)
#define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_write_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_write_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_commit_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_commit_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_open_maxsz + \
@@ -285,6 +361,7 @@
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_open_maxsz + \
@@ -301,43 +378,53 @@
decode_putfh_maxsz + \
decode_open_confirm_maxsz)
#define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_downgrade_sz \
(compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_downgrade_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_downgrade_sz \
(compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_downgrade_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_close_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_close_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_setattr_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_setattr_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_fsinfo_maxsz)
#define NFS4_dec_fsinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_renew_sz (compound_encode_hdr_maxsz + \
@@ -359,64 +446,81 @@
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_lock_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lock_maxsz)
#define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lock_maxsz)
#define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lockt_maxsz)
#define NFS4_dec_lockt_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lockt_maxsz)
#define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_locku_maxsz)
#define NFS4_dec_locku_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_locku_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_access_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_access_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_access_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putrootfh_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_lookup_root_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_remove_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 5 + \
+ decode_remove_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
@@ -425,6 +529,7 @@
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
@@ -433,6 +538,7 @@
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
@@ -441,6 +547,7 @@
encode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
@@ -449,16 +556,19 @@
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_symlink_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_symlink_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_create_maxsz + \
@@ -467,6 +577,7 @@
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_create_maxsz + \
@@ -475,52 +586,98 @@
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_pathconf_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_statfs_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_statfs_maxsz)
#define NFS4_dec_statfs_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_statfs_maxsz)
#define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_delegreturn_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_delegreturn_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getacl_maxsz)
#define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getacl_maxsz)
#define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_setacl_maxsz)
#define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_setacl_maxsz)
#define NFS4_enc_fs_locations_sz \
(compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
encode_fs_locations_maxsz)
#define NFS4_dec_fs_locations_sz \
(compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_fs_locations_maxsz)
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_enc_exchange_id_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_exchange_id_maxsz)
+#define NFS4_dec_exchange_id_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_exchange_id_maxsz)
+#define NFS4_enc_create_session_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_create_session_maxsz)
+#define NFS4_dec_create_session_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_create_session_maxsz)
+#define NFS4_enc_destroy_session_sz (compound_encode_hdr_maxsz + \
+ encode_destroy_session_maxsz)
+#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \
+ decode_destroy_session_maxsz)
+#define NFS4_enc_sequence_sz \
+ (compound_decode_hdr_maxsz + \
+ encode_sequence_maxsz)
+#define NFS4_dec_sequence_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz)
+#define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putrootfh_maxsz + \
+ encode_fsinfo_maxsz)
+#define NFS4_dec_get_lease_time_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putrootfh_maxsz + \
+ decode_fsinfo_maxsz)
+#endif /* CONFIG_NFS_V4_1 */
static const umode_t nfs_type2fmt[] = {
[NF4BAD] = 0,
@@ -541,6 +698,8 @@
__be32 * nops_p;
uint32_t taglen;
char * tag;
+ uint32_t replen; /* expected reply words */
+ u32 minorversion;
};
/*
@@ -576,22 +735,31 @@
xdr_encode_opaque(p, str, len);
}
-static void encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
+static void encode_compound_hdr(struct xdr_stream *xdr,
+ struct rpc_rqst *req,
+ struct compound_hdr *hdr)
{
__be32 *p;
+ struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+
+ /* initialize running count of expected bytes in reply.
+ * NOTE: the replied tag SHOULD be the same is the one sent,
+ * but this is not required as a MUST for the server to do so. */
+ hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen;
dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag);
BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
RESERVE_SPACE(12+(XDR_QUADLEN(hdr->taglen)<<2));
WRITE32(hdr->taglen);
WRITEMEM(hdr->tag, hdr->taglen);
- WRITE32(NFS4_MINOR_VERSION);
+ WRITE32(hdr->minorversion);
hdr->nops_p = p;
WRITE32(hdr->nops);
}
static void encode_nops(struct compound_hdr *hdr)
{
+ BUG_ON(hdr->nops > NFS4_MAX_OPS);
*hdr->nops_p = htonl(hdr->nops);
}
@@ -736,6 +904,7 @@
WRITE32(OP_ACCESS);
WRITE32(access);
hdr->nops++;
+ hdr->replen += decode_access_maxsz;
}
static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
@@ -747,6 +916,7 @@
WRITE32(arg->seqid->sequence->counter);
WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_close_maxsz;
}
static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
@@ -758,6 +928,7 @@
WRITE64(args->offset);
WRITE32(args->count);
hdr->nops++;
+ hdr->replen += decode_commit_maxsz;
}
static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr)
@@ -789,6 +960,7 @@
WRITE32(create->name->len);
WRITEMEM(create->name->name, create->name->len);
hdr->nops++;
+ hdr->replen += decode_create_maxsz;
encode_attrs(xdr, create->attrs, create->server);
}
@@ -802,6 +974,7 @@
WRITE32(1);
WRITE32(bitmap);
hdr->nops++;
+ hdr->replen += decode_getattr_maxsz;
}
static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr)
@@ -814,6 +987,7 @@
WRITE32(bm0);
WRITE32(bm1);
hdr->nops++;
+ hdr->replen += decode_getattr_maxsz;
}
static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
@@ -841,6 +1015,7 @@
RESERVE_SPACE(4);
WRITE32(OP_GETFH);
hdr->nops++;
+ hdr->replen += decode_getfh_maxsz;
}
static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -852,6 +1027,7 @@
WRITE32(name->len);
WRITEMEM(name->name, name->len);
hdr->nops++;
+ hdr->replen += decode_link_maxsz;
}
static inline int nfs4_lock_type(struct file_lock *fl, int block)
@@ -899,6 +1075,7 @@
WRITE32(args->lock_seqid->sequence->counter);
}
hdr->nops++;
+ hdr->replen += decode_lock_maxsz;
}
static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr)
@@ -915,6 +1092,7 @@
WRITEMEM("lock id:", 8);
WRITE64(args->lock_owner.id);
hdr->nops++;
+ hdr->replen += decode_lockt_maxsz;
}
static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr)
@@ -929,6 +1107,7 @@
WRITE64(args->fl->fl_start);
WRITE64(nfs4_lock_length(args->fl));
hdr->nops++;
+ hdr->replen += decode_locku_maxsz;
}
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -941,6 +1120,7 @@
WRITE32(len);
WRITEMEM(name->name, len);
hdr->nops++;
+ hdr->replen += decode_lookup_maxsz;
}
static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
@@ -1080,6 +1260,7 @@
BUG();
}
hdr->nops++;
+ hdr->replen += decode_open_maxsz;
}
static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr)
@@ -1091,6 +1272,7 @@
WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
WRITE32(arg->seqid->sequence->counter);
hdr->nops++;
+ hdr->replen += decode_open_confirm_maxsz;
}
static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
@@ -1103,6 +1285,7 @@
WRITE32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
hdr->nops++;
+ hdr->replen += decode_open_downgrade_maxsz;
}
static void
@@ -1116,6 +1299,7 @@
WRITE32(len);
WRITEMEM(fh->data, len);
hdr->nops++;
+ hdr->replen += decode_putfh_maxsz;
}
static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
@@ -1125,6 +1309,7 @@
RESERVE_SPACE(4);
WRITE32(OP_PUTROOTFH);
hdr->nops++;
+ hdr->replen += decode_putrootfh_maxsz;
}
static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx)
@@ -1153,6 +1338,7 @@
WRITE64(args->offset);
WRITE32(args->count);
hdr->nops++;
+ hdr->replen += decode_read_maxsz;
}
static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr)
@@ -1178,6 +1364,7 @@
WRITE32(attrs[0] & readdir->bitmask[0]);
WRITE32(attrs[1] & readdir->bitmask[1]);
hdr->nops++;
+ hdr->replen += decode_readdir_maxsz;
dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
__func__,
(unsigned long long)readdir->cookie,
@@ -1194,6 +1381,7 @@
RESERVE_SPACE(4);
WRITE32(OP_READLINK);
hdr->nops++;
+ hdr->replen += decode_readlink_maxsz;
}
static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -1205,6 +1393,7 @@
WRITE32(name->len);
WRITEMEM(name->name, name->len);
hdr->nops++;
+ hdr->replen += decode_remove_maxsz;
}
static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr)
@@ -1220,6 +1409,7 @@
WRITE32(newname->len);
WRITEMEM(newname->name, newname->len);
hdr->nops++;
+ hdr->replen += decode_rename_maxsz;
}
static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr)
@@ -1230,6 +1420,7 @@
WRITE32(OP_RENEW);
WRITE64(client_stateid->cl_clientid);
hdr->nops++;
+ hdr->replen += decode_renew_maxsz;
}
static void
@@ -1240,6 +1431,7 @@
RESERVE_SPACE(4);
WRITE32(OP_RESTOREFH);
hdr->nops++;
+ hdr->replen += decode_restorefh_maxsz;
}
static int
@@ -1259,6 +1451,7 @@
WRITE32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
hdr->nops++;
+ hdr->replen += decode_setacl_maxsz;
return 0;
}
@@ -1270,6 +1463,7 @@
RESERVE_SPACE(4);
WRITE32(OP_SAVEFH);
hdr->nops++;
+ hdr->replen += decode_savefh_maxsz;
}
static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr)
@@ -1280,6 +1474,7 @@
WRITE32(OP_SETATTR);
WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_setattr_maxsz;
encode_attrs(xdr, arg->iap, server);
}
@@ -1299,6 +1494,7 @@
RESERVE_SPACE(4);
WRITE32(setclientid->sc_cb_ident);
hdr->nops++;
+ hdr->replen += decode_setclientid_maxsz;
}
static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr)
@@ -1310,6 +1506,7 @@
WRITE64(client_state->cl_clientid);
WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
hdr->nops++;
+ hdr->replen += decode_setclientid_confirm_maxsz;
}
static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
@@ -1328,6 +1525,7 @@
xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
hdr->nops++;
+ hdr->replen += decode_write_maxsz;
}
static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr)
@@ -1339,11 +1537,163 @@
WRITE32(OP_DELEGRETURN);
WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_delegreturn_maxsz;
}
+
+#if defined(CONFIG_NFS_V4_1)
+/* NFSv4.1 operations */
+static void encode_exchange_id(struct xdr_stream *xdr,
+ struct nfs41_exchange_id_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ RESERVE_SPACE(4 + sizeof(args->verifier->data));
+ WRITE32(OP_EXCHANGE_ID);
+ WRITEMEM(args->verifier->data, sizeof(args->verifier->data));
+
+ encode_string(xdr, args->id_len, args->id);
+
+ RESERVE_SPACE(12);
+ WRITE32(args->flags);
+ WRITE32(0); /* zero length state_protect4_a */
+ WRITE32(0); /* zero length implementation id array */
+ hdr->nops++;
+ hdr->replen += decode_exchange_id_maxsz;
+}
+
+static void encode_create_session(struct xdr_stream *xdr,
+ struct nfs41_create_session_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
+ uint32_t len;
+ struct nfs_client *clp = args->client;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_CREATE_SESSION);
+
+ RESERVE_SPACE(8);
+ WRITE64(clp->cl_ex_clid);
+
+ RESERVE_SPACE(8);
+ WRITE32(clp->cl_seqid); /*Sequence id */
+ WRITE32(args->flags); /*flags */
+
+ RESERVE_SPACE(2*28); /* 2 channel_attrs */
+ /* Fore Channel */
+ WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
+ WRITE32(args->fc_attrs.max_rqst_sz); /* max req size */
+ WRITE32(args->fc_attrs.max_resp_sz); /* max resp size */
+ WRITE32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ WRITE32(args->fc_attrs.max_ops); /* max operations */
+ WRITE32(args->fc_attrs.max_reqs); /* max requests */
+ WRITE32(0); /* rdmachannel_attrs */
+
+ /* Back Channel */
+ WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
+ WRITE32(args->bc_attrs.max_rqst_sz); /* max req size */
+ WRITE32(args->bc_attrs.max_resp_sz); /* max resp size */
+ WRITE32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ WRITE32(args->bc_attrs.max_ops); /* max operations */
+ WRITE32(args->bc_attrs.max_reqs); /* max requests */
+ WRITE32(0); /* rdmachannel_attrs */
+
+ RESERVE_SPACE(4);
+ WRITE32(args->cb_program); /* cb_program */
+
+ RESERVE_SPACE(4); /* # of security flavors */
+ WRITE32(1);
+
+ RESERVE_SPACE(4);
+ WRITE32(RPC_AUTH_UNIX); /* auth_sys */
+
+ /* authsys_parms rfc1831 */
+ RESERVE_SPACE(4);
+ WRITE32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
+ len = scnprintf(machine_name, sizeof(machine_name), "%s",
+ clp->cl_ipaddr);
+ RESERVE_SPACE(16 + len);
+ WRITE32(len);
+ WRITEMEM(machine_name, len);
+ WRITE32(0); /* UID */
+ WRITE32(0); /* GID */
+ WRITE32(0); /* No more gids */
+ hdr->nops++;
+ hdr->replen += decode_create_session_maxsz;
+}
+
+static void encode_destroy_session(struct xdr_stream *xdr,
+ struct nfs4_session *session,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ RESERVE_SPACE(4 + NFS4_MAX_SESSIONID_LEN);
+ WRITE32(OP_DESTROY_SESSION);
+ WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ hdr->nops++;
+ hdr->replen += decode_destroy_session_maxsz;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static void encode_sequence(struct xdr_stream *xdr,
+ const struct nfs4_sequence_args *args,
+ struct compound_hdr *hdr)
+{
+#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_session *session = args->sa_session;
+ struct nfs4_slot_table *tp;
+ struct nfs4_slot *slot;
+ __be32 *p;
+
+ if (!session)
+ return;
+
+ tp = &session->fc_slot_table;
+
+ WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
+ slot = tp->slots + args->sa_slotid;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_SEQUENCE);
+
+ /*
+ * Sessionid + seqid + slotid + max slotid + cache_this
+ */
+ dprintk("%s: sessionid=%u:%u:%u:%u seqid=%d slotid=%d "
+ "max_slotid=%d cache_this=%d\n",
+ __func__,
+ ((u32 *)session->sess_id.data)[0],
+ ((u32 *)session->sess_id.data)[1],
+ ((u32 *)session->sess_id.data)[2],
+ ((u32 *)session->sess_id.data)[3],
+ slot->seq_nr, args->sa_slotid,
+ tp->highest_used_slotid, args->sa_cache_this);
+ RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 16);
+ WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ WRITE32(slot->seq_nr);
+ WRITE32(args->sa_slotid);
+ WRITE32(tp->highest_used_slotid);
+ WRITE32(args->sa_cache_this);
+ hdr->nops++;
+ hdr->replen += decode_sequence_maxsz;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* END OF "GENERIC" ENCODE ROUTINES.
*/
+static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (args->sa_session)
+ return args->sa_session->clp->cl_minorversion;
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
/*
* Encode an ACCESS request
*/
@@ -1351,11 +1701,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_access(&xdr, args->access, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1370,11 +1721,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_lookup(&xdr, args->name, &hdr);
encode_getfh(&xdr, &hdr);
@@ -1390,11 +1742,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putrootfh(&xdr, &hdr);
encode_getfh(&xdr, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1409,11 +1762,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_remove(&xdr, &args->name, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1428,11 +1782,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->old_dir, &hdr);
encode_savefh(&xdr, &hdr);
encode_putfh(&xdr, args->new_dir, &hdr);
@@ -1451,11 +1806,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
@@ -1474,11 +1830,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_create(&xdr, args, &hdr);
@@ -1505,11 +1862,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
encode_nops(&hdr);
@@ -1523,11 +1881,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_close(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1542,11 +1901,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_open(&xdr, args, &hdr);
@@ -1569,7 +1929,7 @@
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open_confirm(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1583,11 +1943,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1602,11 +1963,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open_downgrade(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1621,11 +1983,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_lock(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1639,11 +2002,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_lockt(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1657,11 +2021,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_locku(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1675,22 +2040,16 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- unsigned int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_readlink(&xdr, args, req, &hdr);
- /* set up reply kvec
- * toplevel_status + taglen + rescount + OP_PUTFH + status
- * + OP_READLINK + status + string length = 8
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readlink_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->pglen);
encode_nops(&hdr);
return 0;
@@ -1703,25 +2062,19 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_readdir(&xdr, args, req, &hdr);
- /* set up reply kvec
- * toplevel_status + taglen + rescount + OP_PUTFH + status
- * + OP_READDIR + status + verifer(2) = 9
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readdir_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->count);
dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
- __func__, replen, args->pages,
+ __func__, hdr.replen << 2, args->pages,
args->pgbase, args->count);
encode_nops(&hdr);
return 0;
@@ -1732,24 +2085,18 @@
*/
static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_read(&xdr, args, &hdr);
- /* set up reply kvec
- * toplevel status + taglen=0 + rescount + OP_PUTFH + status
- * + OP_READ + status + eof + datalen = 9
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_read_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
args->pages, args->pgbase, args->count);
req->rq_rcv_buf.flags |= XDRBUF_READ;
encode_nops(&hdr);
@@ -1763,11 +2110,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_setattr(&xdr, args, args->server, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1783,20 +2131,19 @@
struct nfs_getaclargs *args)
{
struct xdr_stream xdr;
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- int replen;
+ uint32_t replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
+ replen = hdr.replen + nfs4_fattr_bitmap_maxsz + 1;
encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
- /* set up reply buffer: */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_getacl_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen,
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
args->acl_pages, args->acl_pgbase, args->acl_len);
encode_nops(&hdr);
return 0;
@@ -1809,11 +2156,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_write(&xdr, args, &hdr);
req->rq_snd_buf.flags |= XDRBUF_WRITE;
@@ -1829,11 +2177,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_commit(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1848,11 +2197,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_fsinfo(&xdr, args->bitmask, &hdr);
encode_nops(&hdr);
@@ -1866,11 +2216,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
&hdr);
@@ -1885,11 +2236,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
@@ -1900,16 +2252,18 @@
/*
* GETATTR_BITMAP request
*/
-static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p, const struct nfs_fh *fhandle)
+static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_server_caps_arg *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
- encode_putfh(&xdr, fhandle, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_putfh(&xdr, args->fhandle, &hdr);
encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
FATTR4_WORD0_LINK_SUPPORT|
FATTR4_WORD0_SYMLINK_SUPPORT|
@@ -1929,7 +2283,7 @@
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_renew(&xdr, clp, &hdr);
encode_nops(&hdr);
return 0;
@@ -1946,7 +2300,7 @@
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_setclientid(&xdr, sc, &hdr);
encode_nops(&hdr);
return 0;
@@ -1964,7 +2318,7 @@
const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_setclientid_confirm(&xdr, clp, &hdr);
encode_putrootfh(&xdr, &hdr);
encode_fsinfo(&xdr, lease_bitmap, &hdr);
@@ -1979,11 +2333,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fhandle, &hdr);
encode_delegreturn(&xdr, args->stateid, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1998,28 +2353,119 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- int replen;
+ uint32_t replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_lookup(&xdr, args->name, &hdr);
+ replen = hdr.replen; /* get the attribute into args->page */
encode_fs_locations(&xdr, args->bitmask, &hdr);
- /* set up reply
- * toplevel_status + OP_PUTFH + status
- * + OP_LOOKUP + status + OP_GETATTR + status = 7
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + 7) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, &args->page,
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
0, PAGE_SIZE);
encode_nops(&hdr);
return 0;
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * EXCHANGE_ID request
+ */
+static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
+ struct nfs41_exchange_id_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = args->client->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_exchange_id(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a CREATE_SESSION request
+ */
+static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
+ struct nfs41_create_session_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = args->client->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_create_session(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a DESTROY_SESSION request
+ */
+static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_session *session)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = session->clp->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_destroy_session(&xdr, session, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a SEQUENCE request
+ */
+static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_sequence_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(args),
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a GET_LEASE_TIME request
+ */
+static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_get_lease_time_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
+ };
+ const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->la_seq_args, &hdr);
+ encode_putrootfh(&xdr, &hdr);
+ encode_fsinfo(&xdr, lease_bitmap, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* START OF "GENERIC" DECODE ROUTINES.
* These may look a little ugly since they are imported from a "generic"
@@ -3657,7 +4103,7 @@
return decode_op_hdr(xdr, OP_SAVEFH);
}
-static int decode_setattr(struct xdr_stream *xdr, struct nfs_setattrres *res)
+static int decode_setattr(struct xdr_stream *xdr)
{
__be32 *p;
uint32_t bmlen;
@@ -3735,6 +4181,169 @@
return decode_op_hdr(xdr, OP_DELEGRETURN);
}
+#if defined(CONFIG_NFS_V4_1)
+static int decode_exchange_id(struct xdr_stream *xdr,
+ struct nfs41_exchange_id_res *res)
+{
+ __be32 *p;
+ uint32_t dummy;
+ int status;
+ struct nfs_client *clp = res->client;
+
+ status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
+ if (status)
+ return status;
+
+ READ_BUF(8);
+ READ64(clp->cl_ex_clid);
+ READ_BUF(12);
+ READ32(clp->cl_seqid);
+ READ32(clp->cl_exchange_flags);
+
+ /* We ask for SP4_NONE */
+ READ32(dummy);
+ if (dummy != SP4_NONE)
+ return -EIO;
+
+ /* Throw away minor_id */
+ READ_BUF(8);
+
+ /* Throw away Major id */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ /* Throw away server_scope */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ /* Throw away Implementation id array */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ return 0;
+}
+
+static int decode_chan_attrs(struct xdr_stream *xdr,
+ struct nfs4_channel_attrs *attrs)
+{
+ __be32 *p;
+ u32 nr_attrs;
+
+ READ_BUF(28);
+ READ32(attrs->headerpadsz);
+ READ32(attrs->max_rqst_sz);
+ READ32(attrs->max_resp_sz);
+ READ32(attrs->max_resp_sz_cached);
+ READ32(attrs->max_ops);
+ READ32(attrs->max_reqs);
+ READ32(nr_attrs);
+ if (unlikely(nr_attrs > 1)) {
+ printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n",
+ __func__, nr_attrs);
+ return -EINVAL;
+ }
+ if (nr_attrs == 1)
+ READ_BUF(4); /* skip rdma_attrs */
+ return 0;
+}
+
+static int decode_create_session(struct xdr_stream *xdr,
+ struct nfs41_create_session_res *res)
+{
+ __be32 *p;
+ int status;
+ struct nfs_client *clp = res->client;
+ struct nfs4_session *session = clp->cl_session;
+
+ status = decode_op_hdr(xdr, OP_CREATE_SESSION);
+
+ if (status)
+ return status;
+
+ /* sessionid */
+ READ_BUF(NFS4_MAX_SESSIONID_LEN);
+ COPYMEM(&session->sess_id, NFS4_MAX_SESSIONID_LEN);
+
+ /* seqid, flags */
+ READ_BUF(8);
+ READ32(clp->cl_seqid);
+ READ32(session->flags);
+
+ /* Channel attributes */
+ status = decode_chan_attrs(xdr, &session->fc_attrs);
+ if (!status)
+ status = decode_chan_attrs(xdr, &session->bc_attrs);
+ return status;
+}
+
+static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_DESTROY_SESSION);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static int decode_sequence(struct xdr_stream *xdr,
+ struct nfs4_sequence_res *res,
+ struct rpc_rqst *rqstp)
+{
+#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_slot *slot;
+ struct nfs4_sessionid id;
+ u32 dummy;
+ int status;
+ __be32 *p;
+
+ if (!res->sr_session)
+ return 0;
+
+ status = decode_op_hdr(xdr, OP_SEQUENCE);
+ if (status)
+ goto out_err;
+
+ /*
+ * If the server returns different values for sessionID, slotID or
+ * sequence number, the server is looney tunes.
+ */
+ status = -ESERVERFAULT;
+
+ slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
+ READ_BUF(NFS4_MAX_SESSIONID_LEN + 20);
+ COPYMEM(id.data, NFS4_MAX_SESSIONID_LEN);
+ if (memcmp(id.data, res->sr_session->sess_id.data,
+ NFS4_MAX_SESSIONID_LEN)) {
+ dprintk("%s Invalid session id\n", __func__);
+ goto out_err;
+ }
+ /* seqid */
+ READ32(dummy);
+ if (dummy != slot->seq_nr) {
+ dprintk("%s Invalid sequence number\n", __func__);
+ goto out_err;
+ }
+ /* slot id */
+ READ32(dummy);
+ if (dummy != res->sr_slotid) {
+ dprintk("%s Invalid slot id\n", __func__);
+ goto out_err;
+ }
+ /* highest slot id - currently not processed */
+ READ32(dummy);
+ /* target highest slot id - currently not processed */
+ READ32(dummy);
+ /* result flags - currently not processed */
+ READ32(dummy);
+ status = 0;
+out_err:
+ res->sr_status = status;
+ return status;
+#else /* CONFIG_NFS_V4_1 */
+ return 0;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* END OF "GENERIC" DECODE ROUTINES.
*/
@@ -3752,6 +4361,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -3773,7 +4385,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
status = decode_putfh(&xdr);
if (status != 0)
@@ -3796,7 +4412,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3819,7 +4439,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putrootfh(&xdr)) != 0)
goto out;
@@ -3839,7 +4463,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3860,7 +4488,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3890,7 +4522,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3923,7 +4559,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3963,6 +4603,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -3979,12 +4622,13 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
status = encode_setacl(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -3995,7 +4639,8 @@
* Decode SETACL response
*/
static int
-nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p, void *res)
+nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs_setaclres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4005,10 +4650,13 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_setattr(&xdr, res);
+ status = decode_setattr(&xdr);
out:
return status;
}
@@ -4017,7 +4665,8 @@
* Decode GETACL response
*/
static int
-nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p, size_t *acl_len)
+nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs_getaclres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4027,10 +4676,13 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_getacl(&xdr, rqstp, acl_len);
+ status = decode_getacl(&xdr, rqstp, &res->acl_len);
out:
return status;
@@ -4049,6 +4701,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4079,6 +4734,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4133,6 +4791,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4157,10 +4818,13 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_setattr(&xdr, res);
+ status = decode_setattr(&xdr);
if (status)
goto out;
decode_getfattr(&xdr, res->fattr, res->server);
@@ -4181,6 +4845,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4202,6 +4869,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4223,6 +4893,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4234,7 +4907,8 @@
/*
* Decode READLINK response
*/
-static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p, void *res)
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs4_readlink_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4244,6 +4918,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4265,6 +4942,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4286,6 +4966,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4309,6 +4992,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4335,6 +5021,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4349,7 +5038,8 @@
/*
* FSINFO request
*/
-static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_fsinfo_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4358,16 +5048,19 @@
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_fsinfo(&xdr, fsinfo);
+ status = decode_fsinfo(&xdr, res->fsinfo);
return status;
}
/*
* PATHCONF request
*/
-static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *pathconf)
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_pathconf_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4376,16 +5069,19 @@
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_pathconf(&xdr, pathconf);
+ status = decode_pathconf(&xdr, res->pathconf);
return status;
}
/*
* STATFS request
*/
-static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *fsstat)
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_statfs_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4394,9 +5090,11 @@
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_statfs(&xdr, fsstat);
+ status = decode_statfs(&xdr, res->fsstat);
return status;
}
@@ -4410,7 +5108,11 @@
int status;
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -4483,7 +5185,10 @@
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
- if (status != 0)
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
status = decode_putfh(&xdr);
if (status != 0)
@@ -4497,7 +5202,8 @@
/*
* FS_LOCATIONS request
*/
-static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations *res)
+static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_fs_locations_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4505,18 +5211,113 @@
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
- if (status != 0)
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
if ((status = decode_lookup(&xdr)) != 0)
goto out;
xdr_enter_page(&xdr, PAGE_SIZE);
- status = decode_getfattr(&xdr, &res->fattr, res->server);
+ status = decode_getfattr(&xdr, &res->fs_locations->fattr,
+ res->fs_locations->server);
out:
return status;
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * EXCHANGE_ID request
+ */
+static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
+ void *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_exchange_id(&xdr, res);
+ return status;
+}
+
+/*
+ * a CREATE_SESSION request
+ */
+static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs41_create_session_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_create_session(&xdr, res);
+ return status;
+}
+
+/*
+ * a DESTROY_SESSION request
+ */
+static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
+ void *dummy)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_destroy_session(&xdr, dummy);
+ return status;
+}
+
+/*
+ * a SEQUENCE request
+ */
+static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_sequence_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_sequence(&xdr, res, rqstp);
+ return status;
+}
+
+/*
+ * a GET_LEASE_TIME request
+ */
+static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_get_lease_time_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_sequence(&xdr, &res->lr_seq_res, rqstp);
+ if (!status)
+ status = decode_putrootfh(&xdr);
+ if (!status)
+ status = decode_fsinfo(&xdr, res->lr_fsinfo);
+ return status;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
__be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
{
uint32_t bitmap[2] = {0};
@@ -4686,6 +5487,13 @@
PROC(GETACL, enc_getacl, dec_getacl),
PROC(SETACL, enc_setacl, dec_setacl),
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
+#if defined(CONFIG_NFS_V4_1)
+ PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
+ PROC(CREATE_SESSION, enc_create_session, dec_create_session),
+ PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
+ PROC(SEQUENCE, enc_sequence, dec_sequence),
+ PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+#endif /* CONFIG_NFS_V4_1 */
};
struct rpc_version nfs_version4 = {
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index e3ed590..8c55b27 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -92,6 +92,9 @@
#undef NFSROOT_DEBUG
#define NFSDBG_FACILITY NFSDBG_ROOT
+/* Default port to use if server is not running a portmapper */
+#define NFS_MNT_PORT 627
+
/* Default path we try to mount. "%s" gets replaced by our IP address */
#define NFS_ROOT "/tftpboot/%s"
@@ -487,6 +490,7 @@
{
struct nfs_fh fh;
struct sockaddr_in sin;
+ unsigned int auth_flav_len = 0;
struct nfs_mount_request request = {
.sap = (struct sockaddr *)&sin,
.salen = sizeof(sin),
@@ -496,6 +500,7 @@
.protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP,
.fh = &fh,
+ .auth_flav_len = &auth_flav_len,
};
int status;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 4ace3c5..96c4ebf 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -22,6 +22,7 @@
#include <asm/system.h>
+#include "nfs4_fs.h"
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
@@ -46,6 +47,7 @@
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
@@ -357,19 +359,25 @@
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
- return;
+ goto out;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
- return;
+ goto out;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
+ return;
+out:
+ nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
+ &data->res.seq_res);
+ return;
+
}
/*
@@ -406,7 +414,23 @@
nfs_readdata_release(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_read_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+
+ if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
+ &data->args.seq_args, &data->res.seq_res,
+ 0, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_read_partial_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_readpage_result_partial,
.rpc_release = nfs_readpage_release_partial,
};
@@ -470,6 +494,9 @@
}
static const struct rpc_call_ops nfs_read_full_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_readpage_result_full,
.rpc_release = nfs_readpage_release_full,
};
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 26127b6..0b4cbdc 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -42,6 +42,8 @@
#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
+#include <linux/mnt_namespace.h>
+#include <linux/namei.h>
#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/inet.h>
@@ -90,6 +92,7 @@
Opt_mountport,
Opt_mountvers,
Opt_nfsvers,
+ Opt_minorversion,
/* Mount options that take string arguments */
Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
@@ -139,22 +142,23 @@
{ Opt_fscache_uniq, "fsc=%s" },
{ Opt_nofscache, "nofsc" },
- { Opt_port, "port=%u" },
- { Opt_rsize, "rsize=%u" },
- { Opt_wsize, "wsize=%u" },
- { Opt_bsize, "bsize=%u" },
- { Opt_timeo, "timeo=%u" },
- { Opt_retrans, "retrans=%u" },
- { Opt_acregmin, "acregmin=%u" },
- { Opt_acregmax, "acregmax=%u" },
- { Opt_acdirmin, "acdirmin=%u" },
- { Opt_acdirmax, "acdirmax=%u" },
- { Opt_actimeo, "actimeo=%u" },
- { Opt_namelen, "namlen=%u" },
- { Opt_mountport, "mountport=%u" },
- { Opt_mountvers, "mountvers=%u" },
- { Opt_nfsvers, "nfsvers=%u" },
- { Opt_nfsvers, "vers=%u" },
+ { Opt_port, "port=%s" },
+ { Opt_rsize, "rsize=%s" },
+ { Opt_wsize, "wsize=%s" },
+ { Opt_bsize, "bsize=%s" },
+ { Opt_timeo, "timeo=%s" },
+ { Opt_retrans, "retrans=%s" },
+ { Opt_acregmin, "acregmin=%s" },
+ { Opt_acregmax, "acregmax=%s" },
+ { Opt_acdirmin, "acdirmin=%s" },
+ { Opt_acdirmax, "acdirmax=%s" },
+ { Opt_actimeo, "actimeo=%s" },
+ { Opt_namelen, "namlen=%s" },
+ { Opt_mountport, "mountport=%s" },
+ { Opt_mountvers, "mountvers=%s" },
+ { Opt_nfsvers, "nfsvers=%s" },
+ { Opt_nfsvers, "vers=%s" },
+ { Opt_minorversion, "minorversion=%u" },
{ Opt_sec, "sec=%s" },
{ Opt_proto, "proto=%s" },
@@ -270,10 +274,14 @@
#ifdef CONFIG_NFS_V4
static int nfs4_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static int nfs4_remote_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static int nfs4_xdev_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static int nfs4_referral_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static void nfs4_kill_super(struct super_block *sb);
static struct file_system_type nfs4_fs_type = {
@@ -284,6 +292,14 @@
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+static struct file_system_type nfs4_remote_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .get_sb = nfs4_remote_get_sb,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
struct file_system_type nfs4_xdev_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
@@ -292,6 +308,14 @@
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+static struct file_system_type nfs4_remote_referral_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .get_sb = nfs4_remote_referral_get_sb,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
struct file_system_type nfs4_referral_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
@@ -514,7 +538,6 @@
const char *nostr;
} nfs_info[] = {
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
- { NFS_MOUNT_INTR, ",intr", ",nointr" },
{ NFS_MOUNT_POSIX, ",posix", "" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
@@ -943,11 +966,6 @@
return 1;
}
-static void nfs_parse_invalid_value(const char *option)
-{
- dfprintk(MOUNT, "NFS: bad value specified for %s option\n", option);
-}
-
/*
* Error-check and convert a string of mount options from user space into
* a data structure. The whole mount string is processed; bad options are
@@ -958,7 +976,7 @@
struct nfs_parsed_mount_data *mnt)
{
char *p, *string, *secdata;
- int rc, sloppy = 0, errors = 0;
+ int rc, sloppy = 0, invalid_option = 0;
if (!raw) {
dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
@@ -982,7 +1000,9 @@
while ((p = strsep(&raw, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
- int option, token;
+ unsigned long option;
+ int int_option;
+ int token;
if (!*p)
continue;
@@ -1091,114 +1111,156 @@
* options that take numeric values
*/
case Opt_port:
- if (match_int(args, &option) ||
- option < 0 || option > USHORT_MAX) {
- errors++;
- nfs_parse_invalid_value("port");
- } else
- mnt->nfs_server.port = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option > USHORT_MAX)
+ goto out_invalid_value;
+ mnt->nfs_server.port = option;
break;
case Opt_rsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("rsize");
- } else
- mnt->rsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->rsize = option;
break;
case Opt_wsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("wsize");
- } else
- mnt->wsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->wsize = option;
break;
case Opt_bsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("bsize");
- } else
- mnt->bsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->bsize = option;
break;
case Opt_timeo:
- if (match_int(args, &option) || option <= 0) {
- errors++;
- nfs_parse_invalid_value("timeo");
- } else
- mnt->timeo = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option == 0)
+ goto out_invalid_value;
+ mnt->timeo = option;
break;
case Opt_retrans:
- if (match_int(args, &option) || option <= 0) {
- errors++;
- nfs_parse_invalid_value("retrans");
- } else
- mnt->retrans = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option == 0)
+ goto out_invalid_value;
+ mnt->retrans = option;
break;
case Opt_acregmin:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acregmin");
- } else
- mnt->acregmin = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmin = option;
break;
case Opt_acregmax:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acregmax");
- } else
- mnt->acregmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmax = option;
break;
case Opt_acdirmin:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acdirmin");
- } else
- mnt->acdirmin = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acdirmin = option;
break;
case Opt_acdirmax:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acdirmax");
- } else
- mnt->acdirmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acdirmax = option;
break;
case Opt_actimeo:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("actimeo");
- } else
- mnt->acregmin = mnt->acregmax =
- mnt->acdirmin = mnt->acdirmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmin = mnt->acregmax =
+ mnt->acdirmin = mnt->acdirmax = option;
break;
case Opt_namelen:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("namlen");
- } else
- mnt->namlen = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->namlen = option;
break;
case Opt_mountport:
- if (match_int(args, &option) ||
- option < 0 || option > USHORT_MAX) {
- errors++;
- nfs_parse_invalid_value("mountport");
- } else
- mnt->mount_server.port = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option > USHORT_MAX)
+ goto out_invalid_value;
+ mnt->mount_server.port = option;
break;
case Opt_mountvers:
- if (match_int(args, &option) ||
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 ||
option < NFS_MNT_VERSION ||
- option > NFS_MNT3_VERSION) {
- errors++;
- nfs_parse_invalid_value("mountvers");
- } else
- mnt->mount_server.version = option;
+ option > NFS_MNT3_VERSION)
+ goto out_invalid_value;
+ mnt->mount_server.version = option;
break;
case Opt_nfsvers:
- if (match_int(args, &option)) {
- errors++;
- nfs_parse_invalid_value("nfsvers");
- break;
- }
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
switch (option) {
case NFS2_VERSION:
mnt->flags &= ~NFS_MOUNT_VER3;
@@ -1207,10 +1269,16 @@
mnt->flags |= NFS_MOUNT_VER3;
break;
default:
- errors++;
- nfs_parse_invalid_value("nfsvers");
+ goto out_invalid_value;
}
break;
+ case Opt_minorversion:
+ if (match_int(args, &int_option))
+ return 0;
+ if (int_option < 0 || int_option > NFS4_MAX_MINOR_VERSION)
+ return 0;
+ mnt->minorversion = int_option;
+ break;
/*
* options that take text values
@@ -1222,9 +1290,9 @@
rc = nfs_parse_security_flavors(string, mnt);
kfree(string);
if (!rc) {
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"security flavor\n");
+ return 0;
}
break;
case Opt_proto:
@@ -1238,23 +1306,25 @@
case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ kfree(string);
break;
case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ kfree(string);
break;
case Opt_xprt_rdma:
/* vector side protocols to TCP */
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
xprt_load_transport(string);
+ kfree(string);
break;
default:
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"transport protocol\n");
+ return 0;
}
- kfree(string);
break;
case Opt_mountproto:
string = match_strdup(args);
@@ -1273,9 +1343,9 @@
break;
case Opt_xprt_rdma: /* not used for side protocols */
default:
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"transport protocol\n");
+ return 0;
}
break;
case Opt_addr:
@@ -1331,9 +1401,9 @@
mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE;
break;
default:
- errors++;
dfprintk(MOUNT, "NFS: invalid "
"lookupcache argument\n");
+ return 0;
};
break;
@@ -1351,20 +1421,20 @@
break;
default:
- errors++;
+ invalid_option = 1;
dfprintk(MOUNT, "NFS: unrecognized mount option "
"'%s'\n", p);
}
}
- if (errors > 0) {
- dfprintk(MOUNT, "NFS: parsing encountered %d error%s\n",
- errors, (errors == 1 ? "" : "s"));
- if (!sloppy)
- return 0;
- }
+ if (!sloppy && invalid_option)
+ return 0;
+
return 1;
+out_invalid_value:
+ printk(KERN_INFO "NFS: bad mount option value specified: %s \n", p);
+ return 0;
out_nomem:
printk(KERN_INFO "NFS: not enough memory to parse option\n");
return 0;
@@ -1381,6 +1451,7 @@
static int nfs_try_mount(struct nfs_parsed_mount_data *args,
struct nfs_fh *root_fh)
{
+ unsigned int auth_flavor_len = 0;
struct nfs_mount_request request = {
.sap = (struct sockaddr *)
&args->mount_server.address,
@@ -1388,6 +1459,7 @@
.protocol = args->mount_server.protocol,
.fh = root_fh,
.noresvport = args->flags & NFS_MOUNT_NORESVPORT,
+ .auth_flav_len = &auth_flavor_len,
};
int status;
@@ -2240,6 +2312,11 @@
nfs_initialise_sb(sb);
}
+static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
+{
+ args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3);
+}
+
/*
* Validate NFSv4 mount options
*/
@@ -2263,6 +2340,7 @@
args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */
args->auth_flavors[0] = RPC_AUTH_UNIX;
args->auth_flavor_len = 0;
+ args->minorversion = 0;
switch (data->version) {
case 1:
@@ -2336,6 +2414,8 @@
nfs_validate_transport_protocol(args);
+ nfs4_validate_mount_flags(args);
+
if (args->auth_flavor_len > 1)
goto out_inval_auth;
@@ -2375,12 +2455,12 @@
}
/*
- * Get the superblock for an NFS4 mountpoint
+ * Get the superblock for the NFS4 root partition
*/
-static int nfs4_get_sb(struct file_system_type *fs_type,
+static int nfs4_remote_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
{
- struct nfs_parsed_mount_data *data;
+ struct nfs_parsed_mount_data *data = raw_data;
struct super_block *s;
struct nfs_server *server;
struct nfs_fh *mntfh;
@@ -2391,18 +2471,12 @@
};
int error = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
if (data == NULL || mntfh == NULL)
goto out_free_fh;
security_init_mnt_opts(&data->lsm_opts);
- /* Validate the mount data */
- error = nfs4_validate_mount_data(raw_data, data, dev_name);
- if (error < 0)
- goto out;
-
/* Get a volume representation */
server = nfs4_create_server(data, mntfh);
if (IS_ERR(server)) {
@@ -2415,7 +2489,7 @@
compare_super = NULL;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto out_free;
@@ -2452,14 +2526,9 @@
error = 0;
out:
- kfree(data->client_address);
- kfree(data->nfs_server.export_path);
- kfree(data->nfs_server.hostname);
- kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
kfree(mntfh);
- kfree(data);
return error;
out_free:
@@ -2473,16 +2542,137 @@
goto out;
}
+static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
+ int flags, void *data, const char *hostname)
+{
+ struct vfsmount *root_mnt;
+ char *root_devname;
+ size_t len;
+
+ len = strlen(hostname) + 3;
+ root_devname = kmalloc(len, GFP_KERNEL);
+ if (root_devname == NULL)
+ return ERR_PTR(-ENOMEM);
+ snprintf(root_devname, len, "%s:/", hostname);
+ root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
+ kfree(root_devname);
+ return root_mnt;
+}
+
+static void nfs_fix_devname(const struct path *path, struct vfsmount *mnt)
+{
+ char *page = (char *) __get_free_page(GFP_KERNEL);
+ char *devname, *tmp;
+
+ if (page == NULL)
+ return;
+ devname = nfs_path(path->mnt->mnt_devname,
+ path->mnt->mnt_root, path->dentry,
+ page, PAGE_SIZE);
+ if (devname == NULL)
+ goto out_freepage;
+ tmp = kstrdup(devname, GFP_KERNEL);
+ if (tmp == NULL)
+ goto out_freepage;
+ kfree(mnt->mnt_devname);
+ mnt->mnt_devname = tmp;
+out_freepage:
+ free_page((unsigned long)page);
+}
+
+static int nfs_follow_remote_path(struct vfsmount *root_mnt,
+ const char *export_path, struct vfsmount *mnt_target)
+{
+ struct mnt_namespace *ns_private;
+ struct nameidata nd;
+ struct super_block *s;
+ int ret;
+
+ ns_private = create_mnt_ns(root_mnt);
+ ret = PTR_ERR(ns_private);
+ if (IS_ERR(ns_private))
+ goto out_mntput;
+
+ ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
+ export_path, LOOKUP_FOLLOW, &nd);
+
+ put_mnt_ns(ns_private);
+
+ if (ret != 0)
+ goto out_err;
+
+ s = nd.path.mnt->mnt_sb;
+ atomic_inc(&s->s_active);
+ mnt_target->mnt_sb = s;
+ mnt_target->mnt_root = dget(nd.path.dentry);
+
+ /* Correct the device pathname */
+ nfs_fix_devname(&nd.path, mnt_target);
+
+ path_put(&nd.path);
+ down_write(&s->s_umount);
+ return 0;
+out_mntput:
+ mntput(root_mnt);
+out_err:
+ return ret;
+}
+
+/*
+ * Get the superblock for an NFS4 mountpoint
+ */
+static int nfs4_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
+{
+ struct nfs_parsed_mount_data *data;
+ char *export_path;
+ struct vfsmount *root_mnt;
+ int error = -ENOMEM;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ goto out_free_data;
+
+ /* Validate the mount data */
+ error = nfs4_validate_mount_data(raw_data, data, dev_name);
+ if (error < 0)
+ goto out;
+
+ export_path = data->nfs_server.export_path;
+ data->nfs_server.export_path = "/";
+ root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+ data->nfs_server.hostname);
+ data->nfs_server.export_path = export_path;
+
+ error = PTR_ERR(root_mnt);
+ if (IS_ERR(root_mnt))
+ goto out;
+
+ error = nfs_follow_remote_path(root_mnt, export_path, mnt);
+
+out:
+ kfree(data->client_address);
+ kfree(data->nfs_server.export_path);
+ kfree(data->nfs_server.hostname);
+ kfree(data->fscache_uniq);
+out_free_data:
+ kfree(data);
+ dprintk("<-- nfs4_get_sb() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return error;
+}
+
static void nfs4_kill_super(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
+ dprintk("--> %s\n", __func__);
nfs_super_return_all_delegations(sb);
kill_anon_super(sb);
-
nfs4_renewd_prepare_shutdown(server);
nfs_fscache_release_super_cookie(sb);
nfs_free_server(server);
+ dprintk("<-- %s\n", __func__);
}
/*
@@ -2568,12 +2758,9 @@
return error;
}
-/*
- * Create an NFS4 server record on referral traversal
- */
-static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data,
- struct vfsmount *mnt)
+static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data,
+ struct vfsmount *mnt)
{
struct nfs_clone_mount *data = raw_data;
struct super_block *s;
@@ -2652,4 +2839,36 @@
return error;
}
+/*
+ * Create an NFS4 server record on referral traversal
+ */
+static int nfs4_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data,
+ struct vfsmount *mnt)
+{
+ struct nfs_clone_mount *data = raw_data;
+ char *export_path;
+ struct vfsmount *root_mnt;
+ int error;
+
+ dprintk("--> nfs4_referral_get_sb()\n");
+
+ export_path = data->mnt_path;
+ data->mnt_path = "/";
+
+ root_mnt = nfs_do_root_mount(&nfs4_remote_referral_fs_type,
+ flags, data, data->hostname);
+ data->mnt_path = export_path;
+
+ error = PTR_ERR(root_mnt);
+ if (IS_ERR(root_mnt))
+ goto out;
+
+ error = nfs_follow_remote_path(root_mnt, export_path, mnt);
+out:
+ dprintk("<-- nfs4_referral_get_sb() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return error;
+}
+
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index ecc2953..1064c91 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include "internal.h"
+#include "nfs4_fs.h"
struct nfs_unlinkdata {
struct hlist_node list;
@@ -82,7 +83,7 @@
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
}
/**
@@ -102,9 +103,25 @@
nfs_sb_deactive(sb);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_unlinkdata *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->dir);
+
+ if (nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_unlink_ops = {
.rpc_call_done = nfs_async_unlink_done,
.rpc_release = nfs_async_unlink_release,
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_unlink_prepare,
+#endif /* CONFIG_NFS_V4_1 */
};
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
@@ -241,6 +258,7 @@
status = PTR_ERR(data->cred);
goto out_free;
}
+ data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
status = -EBUSY;
spin_lock(&dentry->d_lock);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e560a78..ce72882 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -25,6 +25,7 @@
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
+#include "nfs4_fs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -52,6 +53,7 @@
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
return p;
}
@@ -71,6 +73,7 @@
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
@@ -1048,7 +1051,23 @@
nfs_writedata_release(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_write_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
+
+ if (nfs4_setup_sequence(clp, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_write_partial_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_partial,
.rpc_release = nfs_writeback_release_partial,
};
@@ -1111,6 +1130,9 @@
}
static const struct rpc_call_ops nfs_write_full_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_full,
.rpc_release = nfs_writeback_release_full,
};
@@ -1123,6 +1145,7 @@
{
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
+ struct nfs_server *server = NFS_SERVER(data->inode);
int status;
dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
@@ -1155,7 +1178,7 @@
if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)\n",
- NFS_SERVER(data->inode)->nfs_client->cl_hostname,
+ server->nfs_client->cl_hostname,
resp->verf->committed, argp->stable);
complain = jiffies + 300 * HZ;
}
@@ -1181,7 +1204,7 @@
*/
argp->stable = NFS_FILE_SYNC;
}
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
@@ -1193,6 +1216,7 @@
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
+ nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
return 0;
}
@@ -1349,6 +1373,9 @@
}
static const struct rpc_call_ops nfs_commit_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_commit_done,
.rpc_release = nfs_commit_release,
};
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8b1f8ef..b92a276 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -464,16 +464,11 @@
if (err)
return err;
/*
- * Just a quick sanity check; we could also try to check
- * whether this pseudoflavor is supported, but at worst
- * an unsupported pseudoflavor on the export would just
- * be a pseudoflavor that won't match the flavor of any
- * authenticated request. The administrator will
- * probably discover the problem when someone fails to
- * authenticate.
+ * XXX: It would be nice to also check whether this
+ * pseudoflavor is supported, so we can discover the
+ * problem at export time instead of when a client fails
+ * to authenticate.
*/
- if (f->pseudoflavor < 0)
- return -EINVAL;
err = get_int(mesg, &f->flags);
if (err)
return err;
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 7c9fe83..a713c41 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -652,8 +652,6 @@
* NFSv3 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
-#define nfs3svc_decode_voidargs NULL
-#define nfs3svc_release_void NULL
#define nfs3svc_decode_fhandleargs nfs3svc_decode_fhandle
#define nfs3svc_encode_attrstatres nfs3svc_encode_attrstat
#define nfs3svc_encode_wccstatres nfs3svc_encode_wccstat
@@ -686,28 +684,219 @@
#define WC (7+pAT) /* WCC attributes */
static struct svc_procedure nfsd_procedures3[22] = {
- PROC(null, void, void, void, RC_NOCACHE, ST),
- PROC(getattr, fhandle, attrstat, fhandle, RC_NOCACHE, ST+AT),
- PROC(setattr, sattr, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(lookup, dirop, dirop, fhandle2, RC_NOCACHE, ST+FH+pAT+pAT),
- PROC(access, access, access, fhandle, RC_NOCACHE, ST+pAT+1),
- PROC(readlink, readlink, readlink, fhandle, RC_NOCACHE, ST+pAT+1+NFS3_MAXPATHLEN/4),
- PROC(read, read, read, fhandle, RC_NOCACHE, ST+pAT+4+NFSSVC_MAXBLKSIZE/4),
- PROC(write, write, write, fhandle, RC_REPLBUFF, ST+WC+4),
- PROC(create, create, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(mkdir, mkdir, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(symlink, symlink, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(mknod, mknod, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(remove, dirop, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(rmdir, dirop, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(rename, rename, rename, fhandle2, RC_REPLBUFF, ST+WC+WC),
- PROC(link, link, link, fhandle2, RC_REPLBUFF, ST+pAT+WC),
- PROC(readdir, readdir, readdir, fhandle, RC_NOCACHE, 0),
- PROC(readdirplus,readdirplus, readdir, fhandle, RC_NOCACHE, 0),
- PROC(fsstat, fhandle, fsstat, void, RC_NOCACHE, ST+pAT+2*6+1),
- PROC(fsinfo, fhandle, fsinfo, void, RC_NOCACHE, ST+pAT+12),
- PROC(pathconf, fhandle, pathconf, void, RC_NOCACHE, ST+pAT+6),
- PROC(commit, commit, commit, fhandle, RC_NOCACHE, ST+WC+2),
+ [NFS3PROC_NULL] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_null,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd3_voidargs),
+ .pc_ressize = sizeof(struct nfsd3_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFS3PROC_GETATTR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_getattr,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_attrstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_attrstatres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFS3PROC_SETATTR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_setattr,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_sattrargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_sattrargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_LOOKUP] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_lookup,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_diropres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+FH+pAT+pAT,
+ },
+ [NFS3PROC_ACCESS] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_access,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_accessargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_accessres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_accessargs),
+ .pc_ressize = sizeof(struct nfsd3_accessres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+1,
+ },
+ [NFS3PROC_READLINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readlink,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readlinkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readlinkres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readlinkargs),
+ .pc_ressize = sizeof(struct nfsd3_readlinkres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4,
+ },
+ [NFS3PROC_READ] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_read,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readargs),
+ .pc_ressize = sizeof(struct nfsd3_readres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4,
+ },
+ [NFS3PROC_WRITE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_write,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_writeargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_writeres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_writeargs),
+ .pc_ressize = sizeof(struct nfsd3_writeres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC+4,
+ },
+ [NFS3PROC_CREATE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_create,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_createargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_MKDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_mkdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_mkdirargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_mkdirargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_SYMLINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_symlink,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_symlinkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_symlinkargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_MKNOD] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_mknod,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_mknodargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_mknodargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_REMOVE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_remove,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_RMDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_rmdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_RENAME] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_rename,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_renameargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_renameres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_renameargs),
+ .pc_ressize = sizeof(struct nfsd3_renameres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC+WC,
+ },
+ [NFS3PROC_LINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_link,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_linkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_linkres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_linkargs),
+ .pc_ressize = sizeof(struct nfsd3_linkres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+pAT+WC,
+ },
+ [NFS3PROC_READDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readdirargs),
+ .pc_ressize = sizeof(struct nfsd3_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFS3PROC_READDIRPLUS] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readdirplus,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirplusargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readdirplusargs),
+ .pc_ressize = sizeof(struct nfsd3_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFS3PROC_FSSTAT] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_fsstat,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_fsstatres,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_fsstatres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+2*6+1,
+ },
+ [NFS3PROC_FSINFO] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_fsinfo,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_fsinfores,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_fsinfores),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+12,
+ },
+ [NFS3PROC_PATHCONF] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_pathconf,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_pathconfres,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_pathconfres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+6,
+ },
+ [NFS3PROC_COMMIT] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_commit,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_commitargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_commitres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_commitargs),
+ .pc_ressize = sizeof(struct nfsd3_commitres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+WC+2,
+ },
};
struct svc_version nfsd_version3 = {
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 17d0dd9..01d4ec1 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -272,6 +272,7 @@
err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
&fhp->fh_post_attr);
+ fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
if (err)
fhp->fh_post_saved = 0;
else
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 290289b..3fd23f7 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -140,8 +140,10 @@
int status;
u32 ident;
u32 nops;
+ __be32 *nops_p;
+ u32 minorversion;
u32 taglen;
- char * tag;
+ char *tag;
};
static struct {
@@ -201,33 +203,39 @@
* XDR encode
*/
-static int
+static void
encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
{
__be32 * p;
RESERVE_SPACE(16);
WRITE32(0); /* tag length is always 0 */
- WRITE32(NFS4_MINOR_VERSION);
+ WRITE32(hdr->minorversion);
WRITE32(hdr->ident);
+ hdr->nops_p = p;
WRITE32(hdr->nops);
- return 0;
}
-static int
-encode_cb_recall(struct xdr_stream *xdr, struct nfs4_cb_recall *cb_rec)
+static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
+{
+ *hdr->nops_p = htonl(hdr->nops);
+}
+
+static void
+encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
+ struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
- int len = cb_rec->cbr_fh.fh_size;
+ int len = dp->dl_fh.fh_size;
- RESERVE_SPACE(12+sizeof(cb_rec->cbr_stateid) + len);
+ RESERVE_SPACE(12+sizeof(dp->dl_stateid) + len);
WRITE32(OP_CB_RECALL);
- WRITE32(cb_rec->cbr_stateid.si_generation);
- WRITEMEM(&cb_rec->cbr_stateid.si_opaque, sizeof(stateid_opaque_t));
- WRITE32(cb_rec->cbr_trunc);
+ WRITE32(dp->dl_stateid.si_generation);
+ WRITEMEM(&dp->dl_stateid.si_opaque, sizeof(stateid_opaque_t));
+ WRITE32(0); /* truncate optimization not implemented */
WRITE32(len);
- WRITEMEM(&cb_rec->cbr_fh.fh_base, len);
- return 0;
+ WRITEMEM(&dp->dl_fh.fh_base, len);
+ hdr->nops++;
}
static int
@@ -241,17 +249,18 @@
}
static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_cb_recall *args)
+nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_delegation *args)
{
struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr = {
- .ident = args->cbr_ident,
- .nops = 1,
+ .ident = args->dl_ident,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_cb_compound_hdr(&xdr, &hdr);
- return (encode_cb_recall(&xdr, args));
+ encode_cb_recall(&xdr, args, &hdr);
+ encode_cb_nops(&hdr);
+ return 0;
}
@@ -358,18 +367,21 @@
.pipe_dir_name = "/nfsd4_cb",
};
+static int max_cb_time(void)
+{
+ return max(NFSD_LEASE_TIME/10, (time_t)1) * HZ;
+}
+
/* Reference counting, callback cleanup, etc., all look racy as heck.
* And why is cb_set an atomic? */
-static struct rpc_clnt *setup_callback_client(struct nfs4_client *clp)
+int setup_callback_client(struct nfs4_client *clp)
{
struct sockaddr_in addr;
- struct nfs4_callback *cb = &clp->cl_callback;
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_timeout timeparms = {
- .to_initval = (NFSD_LEASE_TIME/4) * HZ,
- .to_retries = 5,
- .to_maxval = (NFSD_LEASE_TIME/2) * HZ,
- .to_exponential = 1,
+ .to_initval = max_cb_time(),
+ .to_retries = 0,
};
struct rpc_create_args args = {
.protocol = IPPROTO_TCP,
@@ -386,7 +398,7 @@
struct rpc_clnt *client;
if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Initialize address */
memset(&addr, 0, sizeof(addr));
@@ -396,48 +408,77 @@
/* Create RPC client */
client = rpc_create(&args);
- if (IS_ERR(client))
+ if (IS_ERR(client)) {
dprintk("NFSD: couldn't create callback client: %ld\n",
PTR_ERR(client));
- return client;
+ return PTR_ERR(client);
+ }
+ cb->cb_client = client;
+ return 0;
}
-static int do_probe_callback(void *data)
+static void warn_no_callback_path(struct nfs4_client *clp, int reason)
{
- struct nfs4_client *clp = data;
- struct nfs4_callback *cb = &clp->cl_callback;
+ dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
+ (int)clp->cl_name.len, clp->cl_name.data, reason);
+}
+
+static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_client *clp = calldata;
+
+ if (task->tk_status)
+ warn_no_callback_path(clp, task->tk_status);
+ else
+ atomic_set(&clp->cl_cb_conn.cb_set, 1);
+ put_nfs4_client(clp);
+}
+
+static const struct rpc_call_ops nfsd4_cb_probe_ops = {
+ .rpc_call_done = nfsd4_cb_probe_done,
+};
+
+static struct rpc_cred *lookup_cb_cred(struct nfs4_cb_conn *cb)
+{
+ struct auth_cred acred = {
+ .machine_cred = 1
+ };
+
+ /*
+ * Note in the gss case this doesn't actually have to wait for a
+ * gss upcall (or any calls to the client); this just creates a
+ * non-uptodate cred which the rpc state machine will fill in with
+ * a refresh_upcall later.
+ */
+ return rpcauth_lookup_credcache(cb->cb_client->cl_auth, &acred,
+ RPCAUTH_LOOKUP_NEW);
+}
+
+void do_probe_callback(struct nfs4_client *clp)
+{
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
.rpc_argp = clp,
};
- struct rpc_clnt *client;
+ struct rpc_cred *cred;
int status;
- client = setup_callback_client(clp);
- if (IS_ERR(client)) {
- status = PTR_ERR(client);
- dprintk("NFSD: couldn't create callback client: %d\n",
- status);
- goto out_err;
+ cred = lookup_cb_cred(cb);
+ if (IS_ERR(cred)) {
+ status = PTR_ERR(cred);
+ goto out;
}
-
- status = rpc_call_sync(client, &msg, RPC_TASK_SOFT);
-
- if (status)
- goto out_release_client;
-
- cb->cb_client = client;
- atomic_set(&cb->cb_set, 1);
- put_nfs4_client(clp);
- return 0;
-out_release_client:
- rpc_shutdown_client(client);
-out_err:
- dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
- (int)clp->cl_name.len, clp->cl_name.data, status);
- put_nfs4_client(clp);
- return 0;
+ cb->cb_cred = cred;
+ msg.rpc_cred = cb->cb_cred;
+ status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_SOFT,
+ &nfsd4_cb_probe_ops, (void *)clp);
+out:
+ if (status) {
+ warn_no_callback_path(clp, status);
+ put_nfs4_client(clp);
+ }
}
/*
@@ -446,21 +487,65 @@
void
nfsd4_probe_callback(struct nfs4_client *clp)
{
- struct task_struct *t;
+ int status;
- BUG_ON(atomic_read(&clp->cl_callback.cb_set));
+ BUG_ON(atomic_read(&clp->cl_cb_conn.cb_set));
+
+ status = setup_callback_client(clp);
+ if (status) {
+ warn_no_callback_path(clp, status);
+ return;
+ }
/* the task holds a reference to the nfs4_client struct */
atomic_inc(&clp->cl_count);
- t = kthread_run(do_probe_callback, clp, "nfs4_cb_probe");
-
- if (IS_ERR(t))
- atomic_dec(&clp->cl_count);
-
- return;
+ do_probe_callback(clp);
}
+static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+
+ switch (task->tk_status) {
+ case -EIO:
+ /* Network partition? */
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ warn_no_callback_path(clp, task->tk_status);
+ case -EBADHANDLE:
+ case -NFS4ERR_BAD_STATEID:
+ /* Race: client probably got cb_recall
+ * before open reply granting delegation */
+ break;
+ default:
+ /* success, or error we can't handle */
+ return;
+ }
+ if (dp->dl_retries--) {
+ rpc_delay(task, 2*HZ);
+ task->tk_status = 0;
+ rpc_restart_call(task);
+ } else {
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ warn_no_callback_path(clp, task->tk_status);
+ }
+}
+
+static void nfsd4_cb_recall_release(void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+
+ nfs4_put_delegation(dp);
+ put_nfs4_client(clp);
+}
+
+static const struct rpc_call_ops nfsd4_cb_recall_ops = {
+ .rpc_call_done = nfsd4_cb_recall_done,
+ .rpc_release = nfsd4_cb_recall_release,
+};
+
/*
* called with dp->dl_count inc'ed.
*/
@@ -468,41 +553,19 @@
nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_client;
- struct rpc_clnt *clnt = clp->cl_callback.cb_client;
- struct nfs4_cb_recall *cbr = &dp->dl_recall;
+ struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
- .rpc_argp = cbr,
+ .rpc_argp = dp,
+ .rpc_cred = clp->cl_cb_conn.cb_cred
};
- int retries = 1;
- int status = 0;
+ int status;
- cbr->cbr_trunc = 0; /* XXX need to implement truncate optimization */
- cbr->cbr_dp = dp;
-
- status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
- while (retries--) {
- switch (status) {
- case -EIO:
- /* Network partition? */
- atomic_set(&clp->cl_callback.cb_set, 0);
- case -EBADHANDLE:
- case -NFS4ERR_BAD_STATEID:
- /* Race: client probably got cb_recall
- * before open reply granting delegation */
- break;
- default:
- goto out_put_cred;
- }
- ssleep(2);
- status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
+ dp->dl_retries = 1;
+ status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
+ &nfsd4_cb_recall_ops, dp);
+ if (status) {
+ put_nfs4_client(clp);
+ nfs4_put_delegation(dp);
}
-out_put_cred:
- /*
- * Success or failure, now we're either waiting for lease expiration
- * or deleg_return.
- */
- put_nfs4_client(clp);
- nfs4_put_delegation(dp);
- return;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index b2883e9..7c88017 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -51,6 +51,78 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
+static u32 nfsd_attrmask[] = {
+ NFSD_WRITEABLE_ATTRS_WORD0,
+ NFSD_WRITEABLE_ATTRS_WORD1,
+ NFSD_WRITEABLE_ATTRS_WORD2
+};
+
+static u32 nfsd41_ex_attrmask[] = {
+ NFSD_SUPPATTR_EXCLCREAT_WORD0,
+ NFSD_SUPPATTR_EXCLCREAT_WORD1,
+ NFSD_SUPPATTR_EXCLCREAT_WORD2
+};
+
+static __be32
+check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ u32 *bmval, u32 *writable)
+{
+ struct dentry *dentry = cstate->current_fh.fh_dentry;
+ struct svc_export *exp = cstate->current_fh.fh_export;
+
+ /*
+ * Check about attributes are supported by the NFSv4 server or not.
+ * According to spec, unsupported attributes return ERR_ATTRNOTSUPP.
+ */
+ if ((bmval[0] & ~nfsd_suppattrs0(cstate->minorversion)) ||
+ (bmval[1] & ~nfsd_suppattrs1(cstate->minorversion)) ||
+ (bmval[2] & ~nfsd_suppattrs2(cstate->minorversion)))
+ return nfserr_attrnotsupp;
+
+ /*
+ * Check FATTR4_WORD0_ACL & FATTR4_WORD0_FS_LOCATIONS can be supported
+ * in current environment or not.
+ */
+ if (bmval[0] & FATTR4_WORD0_ACL) {
+ if (!IS_POSIXACL(dentry->d_inode))
+ return nfserr_attrnotsupp;
+ }
+ if (bmval[0] & FATTR4_WORD0_FS_LOCATIONS) {
+ if (exp->ex_fslocs.locations == NULL)
+ return nfserr_attrnotsupp;
+ }
+
+ /*
+ * According to spec, read-only attributes return ERR_INVAL.
+ */
+ if (writable) {
+ if ((bmval[0] & ~writable[0]) || (bmval[1] & ~writable[1]) ||
+ (bmval[2] & ~writable[2]))
+ return nfserr_inval;
+ }
+
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_check_open_attributes(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
+{
+ __be32 status = nfs_ok;
+
+ if (open->op_create == NFS4_OPEN_CREATE) {
+ if (open->op_createmode == NFS4_CREATE_UNCHECKED
+ || open->op_createmode == NFS4_CREATE_GUARDED)
+ status = check_attr_support(rqstp, cstate,
+ open->op_bmval, nfsd_attrmask);
+ else if (open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1)
+ status = check_attr_support(rqstp, cstate,
+ open->op_bmval, nfsd41_ex_attrmask);
+ }
+
+ return status;
+}
+
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
@@ -225,6 +297,10 @@
if (status)
goto out;
+ status = nfsd4_check_open_attributes(rqstp, cstate, open);
+ if (status)
+ goto out;
+
/* Openowner is now set, so sequence id will get bumped. Now we need
* these checks before we do any creates: */
status = nfserr_grace;
@@ -395,6 +471,11 @@
if (status)
return status;
+ status = check_attr_support(rqstp, cstate, create->cr_bmval,
+ nfsd_attrmask);
+ if (status)
+ return status;
+
switch (create->cr_type) {
case NF4LNK:
/* ugh! we have to null-terminate the linktext, or
@@ -689,6 +770,12 @@
if (status)
return status;
status = nfs_ok;
+
+ status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
+ nfsd_attrmask);
+ if (status)
+ goto out;
+
if (setattr->sa_acl != NULL)
status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
setattr->sa_acl);
@@ -763,10 +850,10 @@
if (status)
return status;
- if ((verify->ve_bmval[0] & ~nfsd_suppattrs0(cstate->minorversion))
- || (verify->ve_bmval[1] & ~nfsd_suppattrs1(cstate->minorversion))
- || (verify->ve_bmval[2] & ~nfsd_suppattrs2(cstate->minorversion)))
- return nfserr_attrnotsupp;
+ status = check_attr_support(rqstp, cstate, verify->ve_bmval, NULL);
+ if (status)
+ return status;
+
if ((verify->ve_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)
|| (verify->ve_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1))
return nfserr_inval;
@@ -1226,24 +1313,9 @@
return "unknown_operation";
}
-#define nfs4svc_decode_voidargs NULL
-#define nfs4svc_release_void NULL
#define nfsd4_voidres nfsd4_voidargs
-#define nfs4svc_release_compound NULL
struct nfsd4_voidargs { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsd4_proc_##name, \
- (kxdrproc_t) nfs4svc_decode_##argt##args, \
- (kxdrproc_t) nfs4svc_encode_##rest##res, \
- (kxdrproc_t) nfs4svc_release_##relt, \
- sizeof(struct nfsd4_##argt##args), \
- sizeof(struct nfsd4_##rest##res), \
- 0, \
- cache, \
- respsize, \
- }
-
/*
* TODO: At the present time, the NFSv4 server does not do XID caching
* of requests. Implementing XID caching would not be a serious problem,
@@ -1255,8 +1327,23 @@
* better XID's.
*/
static struct svc_procedure nfsd_procedures4[2] = {
- PROC(null, void, void, void, RC_NOCACHE, 1),
- PROC(compound, compound, compound, compound, RC_NOCACHE, NFSD_BUFSIZE/4)
+ [NFSPROC4_NULL] = {
+ .pc_func = (svc_procfunc) nfsd4_proc_null,
+ .pc_encode = (kxdrproc_t) nfs4svc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd4_voidargs),
+ .pc_ressize = sizeof(struct nfsd4_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = 1,
+ },
+ [NFSPROC4_COMPOUND] = {
+ .pc_func = (svc_procfunc) nfsd4_proc_compound,
+ .pc_decode = (kxdrproc_t) nfs4svc_decode_compoundargs,
+ .pc_encode = (kxdrproc_t) nfs4svc_encode_compoundres,
+ .pc_argsize = sizeof(struct nfsd4_compoundargs),
+ .pc_ressize = sizeof(struct nfsd4_compoundres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = NFSD_BUFSIZE/4,
+ },
};
struct svc_version nfsd_version4 = {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3b711f5..980a216 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -182,7 +182,7 @@
{
struct nfs4_delegation *dp;
struct nfs4_file *fp = stp->st_file;
- struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback;
+ struct nfs4_cb_conn *cb = &stp->st_stateowner->so_client->cl_cb_conn;
dprintk("NFSD alloc_init_deleg\n");
if (fp->fi_had_conflict)
@@ -203,10 +203,8 @@
get_file(stp->st_vfs_file);
dp->dl_vfs_file = stp->st_vfs_file;
dp->dl_type = type;
- dp->dl_recall.cbr_dp = NULL;
- dp->dl_recall.cbr_ident = cb->cb_ident;
- dp->dl_recall.cbr_trunc = 0;
- dp->dl_stateid.si_boot = boot_time;
+ dp->dl_ident = cb->cb_ident;
+ dp->dl_stateid.si_boot = get_seconds();
dp->dl_stateid.si_stateownerid = current_delegid++;
dp->dl_stateid.si_fileid = 0;
dp->dl_stateid.si_generation = 0;
@@ -427,6 +425,11 @@
{
int status = 0, np = fchan->maxreqs * NFSD_PAGES_PER_SLOT;
+ if (fchan->maxreqs < 1)
+ return nfserr_inval;
+ else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
+ fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
+
spin_lock(&nfsd_serv->sv_lock);
if (np + nfsd_serv->sv_drc_pages_used > nfsd_serv->sv_drc_max_pages)
np = nfsd_serv->sv_drc_max_pages - nfsd_serv->sv_drc_pages_used;
@@ -446,8 +449,8 @@
* fchan holds the client values on input, and the server values on output
*/
static int init_forechannel_attrs(struct svc_rqst *rqstp,
- struct nfsd4_session *session,
- struct nfsd4_channel_attrs *fchan)
+ struct nfsd4_channel_attrs *session_fchan,
+ struct nfsd4_channel_attrs *fchan)
{
int status = 0;
__u32 maxcount = svc_max_payload(rqstp);
@@ -457,21 +460,21 @@
/* Use the client's max request and max response size if possible */
if (fchan->maxreq_sz > maxcount)
fchan->maxreq_sz = maxcount;
- session->se_fmaxreq_sz = fchan->maxreq_sz;
+ session_fchan->maxreq_sz = fchan->maxreq_sz;
if (fchan->maxresp_sz > maxcount)
fchan->maxresp_sz = maxcount;
- session->se_fmaxresp_sz = fchan->maxresp_sz;
+ session_fchan->maxresp_sz = fchan->maxresp_sz;
/* Set the max response cached size our default which is
* a multiple of PAGE_SIZE and small */
- session->se_fmaxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE;
- fchan->maxresp_cached = session->se_fmaxresp_cached;
+ session_fchan->maxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE;
+ fchan->maxresp_cached = session_fchan->maxresp_cached;
/* Use the client's maxops if possible */
if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND)
fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND;
- session->se_fmaxops = fchan->maxops;
+ session_fchan->maxops = fchan->maxops;
/* try to use the client requested number of slots */
if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
@@ -483,7 +486,7 @@
*/
status = set_forechannel_maxreqs(fchan);
- session->se_fnumslots = fchan->maxreqs;
+ session_fchan->maxreqs = fchan->maxreqs;
return status;
}
@@ -497,12 +500,14 @@
memset(&tmp, 0, sizeof(tmp));
/* FIXME: For now, we just accept the client back channel attributes. */
- status = init_forechannel_attrs(rqstp, &tmp, &cses->fore_channel);
+ tmp.se_bchannel = cses->back_channel;
+ status = init_forechannel_attrs(rqstp, &tmp.se_fchannel,
+ &cses->fore_channel);
if (status)
goto out;
/* allocate struct nfsd4_session and slot table in one piece */
- slotsize = tmp.se_fnumslots * sizeof(struct nfsd4_slot);
+ slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot);
new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL);
if (!new)
goto out;
@@ -576,7 +581,7 @@
int i;
ses = container_of(kref, struct nfsd4_session, se_ref);
- for (i = 0; i < ses->se_fnumslots; i++) {
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry;
nfsd4_release_respages(e->ce_respages, e->ce_resused);
}
@@ -632,16 +637,20 @@
static void
shutdown_callback_client(struct nfs4_client *clp)
{
- struct rpc_clnt *clnt = clp->cl_callback.cb_client;
+ struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
if (clnt) {
/*
* Callback threads take a reference on the client, so there
* should be no outstanding callbacks at this point.
*/
- clp->cl_callback.cb_client = NULL;
+ clp->cl_cb_conn.cb_client = NULL;
rpc_shutdown_client(clnt);
}
+ if (clp->cl_cb_conn.cb_cred) {
+ put_rpccred(clp->cl_cb_conn.cb_cred);
+ clp->cl_cb_conn.cb_cred = NULL;
+ }
}
static inline void
@@ -714,7 +723,7 @@
return NULL;
memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
atomic_set(&clp->cl_count, 1);
- atomic_set(&clp->cl_callback.cb_set, 0);
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_strhash);
INIT_LIST_HEAD(&clp->cl_openowners);
@@ -966,7 +975,7 @@
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
{
- struct nfs4_callback *cb = &clp->cl_callback;
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
/* Currently, we only support tcp for the callback channel */
if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3))
@@ -975,6 +984,7 @@
if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val,
&cb->cb_addr, &cb->cb_port)))
goto out_err;
+ cb->cb_minorversion = 0;
cb->cb_prog = se->se_callback_prog;
cb->cb_ident = se->se_callback_ident;
return;
@@ -1128,7 +1138,7 @@
* is sent (lease renewal).
*/
if (seq && nfsd4_not_cached(resp)) {
- seq->maxslots = resp->cstate.session->se_fnumslots;
+ seq->maxslots = resp->cstate.session->se_fchannel.maxreqs;
return nfs_ok;
}
@@ -1238,12 +1248,6 @@
expire_client(conf);
goto out_new;
}
- if (ip_addr != conf->cl_addr &&
- !(exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A)) {
- /* Client collision. 18.35.4 case 3 */
- status = nfserr_clid_inuse;
- goto out;
- }
/*
* Set bit when the owner id and verifier map to an already
* confirmed client id (18.35.3).
@@ -1257,12 +1261,12 @@
copy_verf(conf, &verf);
new = conf;
goto out_copy;
- } else {
- /* 18.35.4 case 7 */
- if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
- status = nfserr_noent;
- goto out;
- }
+ }
+
+ /* 18.35.4 case 7 */
+ if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+ status = nfserr_noent;
+ goto out;
}
unconf = find_unconfirmed_client_by_str(dname, strhashval, true);
@@ -1471,7 +1475,7 @@
goto out;
status = nfserr_badslot;
- if (seq->slotid >= session->se_fnumslots)
+ if (seq->slotid >= session->se_fchannel.maxreqs)
goto out;
slot = &session->se_slots[seq->slotid];
@@ -1686,9 +1690,7 @@
else {
/* XXX: We just turn off callbacks until we can handle
* change request correctly. */
- atomic_set(&conf->cl_callback.cb_set, 0);
- gen_confirm(conf);
- nfsd4_remove_clid_dir(unconf);
+ atomic_set(&conf->cl_cb_conn.cb_set, 0);
expire_client(unconf);
status = nfs_ok;
@@ -1882,7 +1884,7 @@
stp->st_stateowner = sop;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = boot_time;
+ stp->st_stateid.si_boot = get_seconds();
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
@@ -2059,19 +2061,6 @@
}
/*
- * Recall a delegation
- */
-static int
-do_recall(void *__dp)
-{
- struct nfs4_delegation *dp = __dp;
-
- dp->dl_file->fi_had_conflict = true;
- nfsd4_cb_recall(dp);
- return 0;
-}
-
-/*
* Spawn a thread to perform a recall on the delegation represented
* by the lease (file_lock)
*
@@ -2082,8 +2071,7 @@
static
void nfsd_break_deleg_cb(struct file_lock *fl)
{
- struct nfs4_delegation *dp= (struct nfs4_delegation *)fl->fl_owner;
- struct task_struct *t;
+ struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
if (!dp)
@@ -2111,16 +2099,8 @@
*/
fl->fl_break_time = 0;
- t = kthread_run(do_recall, dp, "%s", "nfs4_cb_recall");
- if (IS_ERR(t)) {
- struct nfs4_client *clp = dp->dl_client;
-
- printk(KERN_INFO "NFSD: Callback thread failed for "
- "for client (clientid %08x/%08x)\n",
- clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
- put_nfs4_client(dp->dl_client);
- nfs4_put_delegation(dp);
- }
+ dp->dl_file->fi_had_conflict = true;
+ nfsd4_cb_recall(dp);
}
/*
@@ -2422,7 +2402,7 @@
{
struct nfs4_delegation *dp;
struct nfs4_stateowner *sop = stp->st_stateowner;
- struct nfs4_callback *cb = &sop->so_client->cl_callback;
+ struct nfs4_cb_conn *cb = &sop->so_client->cl_cb_conn;
struct file_lock fl, *flp = &fl;
int status, flag = 0;
@@ -2614,7 +2594,7 @@
renew_client(clp);
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
- && !atomic_read(&clp->cl_callback.cb_set))
+ && !atomic_read(&clp->cl_cb_conn.cb_set))
goto out;
status = nfs_ok;
out:
@@ -2738,12 +2718,42 @@
static int
STALE_STATEID(stateid_t *stateid)
{
- if (stateid->si_boot == boot_time)
- return 0;
- dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
- stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
- stateid->si_generation);
- return 1;
+ if (time_after((unsigned long)boot_time,
+ (unsigned long)stateid->si_boot)) {
+ dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+EXPIRED_STATEID(stateid_t *stateid)
+{
+ if (time_before((unsigned long)boot_time,
+ ((unsigned long)stateid->si_boot)) &&
+ time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) {
+ dprintk("NFSD: expired stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return 1;
+ }
+ return 0;
+}
+
+static __be32
+stateid_error_map(stateid_t *stateid)
+{
+ if (STALE_STATEID(stateid))
+ return nfserr_stale_stateid;
+ if (EXPIRED_STATEID(stateid))
+ return nfserr_expired;
+
+ dprintk("NFSD: bad stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return nfserr_bad_stateid;
}
static inline int
@@ -2867,8 +2877,10 @@
status = nfserr_bad_stateid;
if (is_delegation_stateid(stateid)) {
dp = find_delegation_stateid(ino, stateid);
- if (!dp)
+ if (!dp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
status = check_stateid_generation(stateid, &dp->dl_stateid,
flags);
if (status)
@@ -2881,8 +2893,10 @@
*filpp = dp->dl_vfs_file;
} else { /* open or lock stateid */
stp = find_stateid(stateid, flags);
- if (!stp)
+ if (!stp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
if (nfs4_check_fh(current_fh, stp))
goto out;
if (!stp->st_stateowner->so_confirmed)
@@ -2956,7 +2970,7 @@
*/
sop = search_close_lru(stateid->si_stateownerid, flags);
if (sop == NULL)
- return nfserr_bad_stateid;
+ return stateid_error_map(stateid);
*sopp = sop;
goto check_replay;
}
@@ -3227,8 +3241,10 @@
if (!is_delegation_stateid(stateid))
goto out;
dp = find_delegation_stateid(inode, stateid);
- if (!dp)
+ if (!dp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
status = check_stateid_generation(stateid, &dp->dl_stateid, flags);
if (status)
goto out;
@@ -3455,7 +3471,7 @@
stp->st_stateowner = sop;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = boot_time;
+ stp->st_stateid.si_boot = get_seconds();
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
@@ -3987,6 +4003,7 @@
INIT_LIST_HEAD(&conf_str_hashtbl[i]);
INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
+ INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
}
for (i = 0; i < SESSION_HASH_SIZE; i++)
INIT_LIST_HEAD(&sessionid_hashtbl[i]);
@@ -4009,8 +4026,6 @@
INIT_LIST_HEAD(&close_lru);
INIT_LIST_HEAD(&client_lru);
INIT_LIST_HEAD(&del_recall_lru);
- for (i = 0; i < CLIENT_HASH_SIZE; i++)
- INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
reclaim_str_hashtbl_size = 0;
return 0;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b73549d..2dcc7fe 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -83,16 +83,6 @@
return 0;
}
-/*
- * START OF "GENERIC" DECODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
#define DECODE_HEAD \
__be32 *p; \
__be32 status
@@ -254,20 +244,8 @@
DECODE_TAIL;
}
-static u32 nfsd_attrmask[] = {
- NFSD_WRITEABLE_ATTRS_WORD0,
- NFSD_WRITEABLE_ATTRS_WORD1,
- NFSD_WRITEABLE_ATTRS_WORD2
-};
-
-static u32 nfsd41_ex_attrmask[] = {
- NFSD_SUPPATTR_EXCLCREAT_WORD0,
- NFSD_SUPPATTR_EXCLCREAT_WORD1,
- NFSD_SUPPATTR_EXCLCREAT_WORD2
-};
-
static __be32
-nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, u32 *writable,
+nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
struct iattr *iattr, struct nfs4_acl **acl)
{
int expected_len, len = 0;
@@ -280,18 +258,6 @@
if ((status = nfsd4_decode_bitmap(argp, bmval)))
return status;
- /*
- * According to spec, unsupported attributes return ERR_ATTRNOTSUPP;
- * read-only attributes return ERR_INVAL.
- */
- if ((bmval[0] & ~nfsd_suppattrs0(argp->minorversion)) ||
- (bmval[1] & ~nfsd_suppattrs1(argp->minorversion)) ||
- (bmval[2] & ~nfsd_suppattrs2(argp->minorversion)))
- return nfserr_attrnotsupp;
- if ((bmval[0] & ~writable[0]) || (bmval[1] & ~writable[1]) ||
- (bmval[2] & ~writable[2]))
- return nfserr_inval;
-
READ_BUF(4);
READ32(expected_len);
@@ -424,8 +390,11 @@
goto xdr_error;
}
}
- BUG_ON(bmval[2]); /* no such writeable attr supported yet */
- if (len != expected_len)
+ if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
+ || bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
+ || bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2)
+ READ_BUF(expected_len - len);
+ else if (len != expected_len)
goto xdr_error;
DECODE_TAIL;
@@ -518,8 +487,8 @@
if ((status = check_filename(create->cr_name, create->cr_namelen, nfserr_inval)))
return status;
- status = nfsd4_decode_fattr(argp, create->cr_bmval, nfsd_attrmask,
- &create->cr_iattr, &create->cr_acl);
+ status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
+ &create->cr_acl);
if (status)
goto out;
@@ -682,7 +651,7 @@
case NFS4_CREATE_UNCHECKED:
case NFS4_CREATE_GUARDED:
status = nfsd4_decode_fattr(argp, open->op_bmval,
- nfsd_attrmask, &open->op_iattr, &open->op_acl);
+ &open->op_iattr, &open->op_acl);
if (status)
goto out;
break;
@@ -696,8 +665,7 @@
READ_BUF(8);
COPYMEM(open->op_verf.data, 8);
status = nfsd4_decode_fattr(argp, open->op_bmval,
- nfsd41_ex_attrmask, &open->op_iattr,
- &open->op_acl);
+ &open->op_iattr, &open->op_acl);
if (status)
goto out;
break;
@@ -893,8 +861,8 @@
status = nfsd4_decode_stateid(argp, &setattr->sa_stateid);
if (status)
return status;
- return nfsd4_decode_fattr(argp, setattr->sa_bmval, nfsd_attrmask,
- &setattr->sa_iattr, &setattr->sa_acl);
+ return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr,
+ &setattr->sa_acl);
}
static __be32
@@ -1328,64 +1296,64 @@
};
static nfsd4_dec nfsd41_dec_ops[] = {
- [OP_ACCESS] (nfsd4_dec)nfsd4_decode_access,
- [OP_CLOSE] (nfsd4_dec)nfsd4_decode_close,
- [OP_COMMIT] (nfsd4_dec)nfsd4_decode_commit,
- [OP_CREATE] (nfsd4_dec)nfsd4_decode_create,
- [OP_DELEGPURGE] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DELEGRETURN] (nfsd4_dec)nfsd4_decode_delegreturn,
- [OP_GETATTR] (nfsd4_dec)nfsd4_decode_getattr,
- [OP_GETFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_LINK] (nfsd4_dec)nfsd4_decode_link,
- [OP_LOCK] (nfsd4_dec)nfsd4_decode_lock,
- [OP_LOCKT] (nfsd4_dec)nfsd4_decode_lockt,
- [OP_LOCKU] (nfsd4_dec)nfsd4_decode_locku,
- [OP_LOOKUP] (nfsd4_dec)nfsd4_decode_lookup,
- [OP_LOOKUPP] (nfsd4_dec)nfsd4_decode_noop,
- [OP_NVERIFY] (nfsd4_dec)nfsd4_decode_verify,
- [OP_OPEN] (nfsd4_dec)nfsd4_decode_open,
- [OP_OPENATTR] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_CONFIRM] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_DOWNGRADE] (nfsd4_dec)nfsd4_decode_open_downgrade,
- [OP_PUTFH] (nfsd4_dec)nfsd4_decode_putfh,
- [OP_PUTPUBFH] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_PUTROOTFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_READ] (nfsd4_dec)nfsd4_decode_read,
- [OP_READDIR] (nfsd4_dec)nfsd4_decode_readdir,
- [OP_READLINK] (nfsd4_dec)nfsd4_decode_noop,
- [OP_REMOVE] (nfsd4_dec)nfsd4_decode_remove,
- [OP_RENAME] (nfsd4_dec)nfsd4_decode_rename,
- [OP_RENEW] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_RESTOREFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_SAVEFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_SECINFO] (nfsd4_dec)nfsd4_decode_secinfo,
- [OP_SETATTR] (nfsd4_dec)nfsd4_decode_setattr,
- [OP_SETCLIENTID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SETCLIENTID_CONFIRM](nfsd4_dec)nfsd4_decode_notsupp,
- [OP_VERIFY] (nfsd4_dec)nfsd4_decode_verify,
- [OP_WRITE] (nfsd4_dec)nfsd4_decode_write,
- [OP_RELEASE_LOCKOWNER] (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
+ [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
+ [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
+ [OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
+ [OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
+ [OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
+ [OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
+ [OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
+ [OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
+ [OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
+ [OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
+ [OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
+ [OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
+ [OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
+ [OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
+ [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_READ] = (nfsd4_dec)nfsd4_decode_read,
+ [OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
+ [OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
+ [OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
+ [OP_RENEW] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
+ [OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
+ [OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SETCLIENTID_CONFIRM]= (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
+ [OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
+ [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_notsupp,
/* new operations for NFSv4.1 */
- [OP_BACKCHANNEL_CTL] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_BIND_CONN_TO_SESSION](nfsd4_dec)nfsd4_decode_notsupp,
- [OP_EXCHANGE_ID] (nfsd4_dec)nfsd4_decode_exchange_id,
- [OP_CREATE_SESSION] (nfsd4_dec)nfsd4_decode_create_session,
- [OP_DESTROY_SESSION] (nfsd4_dec)nfsd4_decode_destroy_session,
- [OP_FREE_STATEID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GET_DIR_DELEGATION] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GETDEVICEINFO] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GETDEVICELIST] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTCOMMIT] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTGET] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTRETURN] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SECINFO_NO_NAME] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SEQUENCE] (nfsd4_dec)nfsd4_decode_sequence,
- [OP_SET_SSV] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_TEST_STATEID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_WANT_DELEGATION] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DESTROY_CLIENTID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_RECLAIM_COMPLETE] (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
+ [OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
+ [OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
+ [OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
+ [OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_notsupp,
};
struct nfsd4_minorversion_ops {
@@ -1489,21 +1457,6 @@
DECODE_TAIL;
}
-/*
- * END OF "GENERIC" DECODE ROUTINES.
- */
-
-/*
- * START OF "GENERIC" ENCODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
-#define ENCODE_HEAD __be32 *p
#define WRITE32(n) *p++ = htonl(n)
#define WRITE64(n) do { \
@@ -1515,13 +1468,41 @@
memcpy(p, ptr, nbytes); \
p += XDR_QUADLEN(nbytes); \
}} while (0)
-#define WRITECINFO(c) do { \
- *p++ = htonl(c.atomic); \
- *p++ = htonl(c.before_ctime_sec); \
- *p++ = htonl(c.before_ctime_nsec); \
- *p++ = htonl(c.after_ctime_sec); \
- *p++ = htonl(c.after_ctime_nsec); \
-} while (0)
+
+static void write32(__be32 **p, u32 n)
+{
+ *(*p)++ = n;
+}
+
+static void write64(__be32 **p, u64 n)
+{
+ write32(p, (u32)(n >> 32));
+ write32(p, (u32)n);
+}
+
+static void write_change(__be32 **p, struct kstat *stat, struct inode *inode)
+{
+ if (IS_I_VERSION(inode)) {
+ write64(p, inode->i_version);
+ } else {
+ write32(p, stat->ctime.tv_sec);
+ write32(p, stat->ctime.tv_nsec);
+ }
+}
+
+static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
+{
+ write32(p, c->atomic);
+ if (c->change_supported) {
+ write64(p, c->before_change);
+ write64(p, c->after_change);
+ } else {
+ write32(p, c->before_ctime_sec);
+ write32(p, c->before_ctime_nsec);
+ write32(p, c->after_ctime_sec);
+ write32(p, c->after_ctime_nsec);
+ }
+}
#define RESERVE_SPACE(nbytes) do { \
p = resp->p; \
@@ -1874,16 +1855,9 @@
WRITE32(NFS4_FH_PERSISTENT|NFS4_FH_VOL_RENAME);
}
if (bmval0 & FATTR4_WORD0_CHANGE) {
- /*
- * Note: This _must_ be consistent with the scheme for writing
- * change_info, so any changes made here must be reflected there
- * as well. (See xdr4.h:set_change_info() and the WRITECINFO()
- * macro above.)
- */
if ((buflen -= 8) < 0)
goto out_resource;
- WRITE32(stat.ctime.tv_sec);
- WRITE32(stat.ctime.tv_nsec);
+ write_change(&p, &stat, dentry->d_inode);
}
if (bmval0 & FATTR4_WORD0_SIZE) {
if ((buflen -= 8) < 0)
@@ -2348,7 +2322,7 @@
static void
nfsd4_encode_stateid(struct nfsd4_compoundres *resp, stateid_t *sid)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(sizeof(stateid_t));
WRITE32(sid->si_generation);
@@ -2359,7 +2333,7 @@
static __be32
nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8);
@@ -2386,7 +2360,7 @@
static __be32
nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8);
@@ -2399,11 +2373,11 @@
static __be32
nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(32);
- WRITECINFO(create->cr_cinfo);
+ write_cinfo(&p, &create->cr_cinfo);
WRITE32(2);
WRITE32(create->cr_bmval[0]);
WRITE32(create->cr_bmval[1]);
@@ -2435,7 +2409,7 @@
{
struct svc_fh *fhp = *fhpp;
unsigned int len;
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
len = fhp->fh_handle.fh_size;
@@ -2454,7 +2428,7 @@
static void
nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denied *ld)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(32 + XDR_LEN(ld->ld_sop ? ld->ld_sop->so_owner.len : 0));
WRITE64(ld->ld_start);
@@ -2510,11 +2484,11 @@
static __be32
nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(20);
- WRITECINFO(link->li_cinfo);
+ write_cinfo(&p, &link->li_cinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2524,7 +2498,7 @@
static __be32
nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
{
- ENCODE_HEAD;
+ __be32 *p;
ENCODE_SEQID_OP_HEAD;
if (nfserr)
@@ -2532,7 +2506,7 @@
nfsd4_encode_stateid(resp, &open->op_stateid);
RESERVE_SPACE(40);
- WRITECINFO(open->op_cinfo);
+ write_cinfo(&p, &open->op_cinfo);
WRITE32(open->op_rflags);
WRITE32(2);
WRITE32(open->op_bmval[0]);
@@ -2619,7 +2593,7 @@
int v, pn;
unsigned long maxcount;
long len;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2681,7 +2655,7 @@
{
int maxcount;
char *page;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2730,7 +2704,7 @@
int maxcount;
loff_t offset;
__be32 *page, *savep, *tailbase;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2806,11 +2780,11 @@
static __be32
nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(20);
- WRITECINFO(remove->rm_cinfo);
+ write_cinfo(&p, &remove->rm_cinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2819,12 +2793,12 @@
static __be32
nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(40);
- WRITECINFO(rename->rn_sinfo);
- WRITECINFO(rename->rn_tinfo);
+ write_cinfo(&p, &rename->rn_sinfo);
+ write_cinfo(&p, &rename->rn_tinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2839,7 +2813,7 @@
u32 nflavs;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
goto out;
@@ -2904,7 +2878,7 @@
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(12);
if (nfserr) {
@@ -2924,7 +2898,7 @@
static __be32
nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8 + sizeof(nfs4_verifier));
@@ -2944,7 +2918,7 @@
static __be32
nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(16);
@@ -2960,7 +2934,7 @@
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_exchange_id *exid)
{
- ENCODE_HEAD;
+ __be32 *p;
char *major_id;
char *server_scope;
int major_id_sz;
@@ -3015,7 +2989,7 @@
nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_create_session *sess)
{
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -3071,7 +3045,7 @@
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_sequence *seq)
{
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -3209,7 +3183,7 @@
dprintk("%s length %u, xb->page_len %u tlen %u pad %u\n", __func__,
length, xb->page_len, tlen, pad);
- if (length <= session->se_fmaxresp_cached)
+ if (length <= session->se_fchannel.maxresp_cached)
return status;
else
return nfserr_rep_too_big_to_cache;
@@ -3219,7 +3193,7 @@
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
__be32 *statp;
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(8);
WRITE32(op->opnum);
@@ -3253,7 +3227,7 @@
void
nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
- ENCODE_HEAD;
+ __be32 *p;
struct nfs4_replay *rp = op->replay;
BUG_ON(!rp);
@@ -3268,10 +3242,6 @@
ADJUST_ARGS();
}
-/*
- * END OF "GENERIC" ENCODE ROUTINES.
- */
-
int
nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 5bfc2ac..4638635 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -29,15 +29,24 @@
*/
#define CACHESIZE 1024
#define HASHSIZE 64
-#define REQHASH(xid) (((((__force __u32)xid) >> 24) ^ ((__force __u32)xid)) & (HASHSIZE-1))
-static struct hlist_head * hash_list;
+static struct hlist_head * cache_hash;
static struct list_head lru_head;
static int cache_disabled = 1;
+/*
+ * Calculate the hash index from an XID.
+ */
+static inline u32 request_hash(u32 xid)
+{
+ u32 h = xid;
+ h ^= (xid >> 24);
+ return h & (HASHSIZE-1);
+}
+
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
-/*
+/*
* locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held.
@@ -62,8 +71,8 @@
i--;
}
- hash_list = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
- if (!hash_list)
+ cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
+ if (!cache_hash)
goto out_nomem;
cache_disabled = 0;
@@ -88,8 +97,8 @@
cache_disabled = 1;
- kfree (hash_list);
- hash_list = NULL;
+ kfree (cache_hash);
+ cache_hash = NULL;
}
/*
@@ -108,7 +117,7 @@
hash_refile(struct svc_cacherep *rp)
{
hlist_del_init(&rp->c_hash);
- hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid));
+ hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
}
/*
@@ -138,7 +147,7 @@
spin_lock(&cache_lock);
rtn = RC_DOIT;
- rh = &hash_list[REQHASH(xid)];
+ rh = &cache_hash[request_hash(xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) {
if (rp->c_state != RC_UNUSED &&
xid == rp->c_xid && proc == rp->c_proc &&
@@ -165,8 +174,8 @@
}
}
- /* This should not happen */
- if (rp == NULL) {
+ /* All entries on the LRU are in-progress. This should not happen */
+ if (&rp->c_lru == &lru_head) {
static int complaints;
printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
@@ -264,7 +273,7 @@
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
len >>= 2;
-
+
/* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) {
rp->c_state = RC_UNUSED;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index af16849..1250fb9 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -207,10 +207,14 @@
static ssize_t write_svc(struct file *file, char *buf, size_t size)
{
struct nfsctl_svc *data;
+ int err;
if (size < sizeof(*data))
return -EINVAL;
data = (struct nfsctl_svc*) buf;
- return nfsd_svc(data->svc_port, data->svc_nthreads);
+ err = nfsd_svc(data->svc_port, data->svc_nthreads);
+ if (err < 0)
+ return err;
+ return 0;
}
/**
@@ -692,11 +696,12 @@
if (newthreads < 0)
return -EINVAL;
rv = nfsd_svc(NFS_PORT, newthreads);
- if (rv)
+ if (rv < 0)
return rv;
- }
- sprintf(buf, "%d\n", nfsd_nrthreads());
- return strlen(buf);
+ } else
+ rv = nfsd_nrthreads();
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv);
}
/**
@@ -793,7 +798,7 @@
{
char *mesg = buf;
char *vers, *minorp, sign;
- int len, num;
+ int len, num, remaining;
unsigned minor;
ssize_t tlen = 0;
char *sep;
@@ -840,32 +845,50 @@
}
next:
vers += len + 1;
- tlen += len;
} while ((len = qword_get(&mesg, vers, size)) > 0);
/* If all get turned off, turn them back on, as
* having no versions is BAD
*/
nfsd_reset_versions();
}
+
/* Now write current state into reply buffer */
len = 0;
sep = "";
+ remaining = SIMPLE_TRANSACTION_LIMIT;
for (num=2 ; num <= 4 ; num++)
if (nfsd_vers(num, NFSD_AVAIL)) {
- len += sprintf(buf+len, "%s%c%d", sep,
+ len = snprintf(buf, remaining, "%s%c%d", sep,
nfsd_vers(num, NFSD_TEST)?'+':'-',
num);
sep = " ";
+
+ if (len > remaining)
+ break;
+ remaining -= len;
+ buf += len;
+ tlen += len;
}
if (nfsd_vers(4, NFSD_AVAIL))
- for (minor = 1; minor <= NFSD_SUPPORTED_MINOR_VERSION; minor++)
- len += sprintf(buf+len, " %c4.%u",
+ for (minor = 1; minor <= NFSD_SUPPORTED_MINOR_VERSION;
+ minor++) {
+ len = snprintf(buf, remaining, " %c4.%u",
(nfsd_vers(4, NFSD_TEST) &&
nfsd_minorversion(minor, NFSD_TEST)) ?
'+' : '-',
minor);
- len += sprintf(buf+len, "\n");
- return len;
+
+ if (len > remaining)
+ break;
+ remaining -= len;
+ buf += len;
+ tlen += len;
+ }
+
+ len = snprintf(buf, remaining, "\n");
+ if (len > remaining)
+ return -EINVAL;
+ return tlen + len;
}
/**
@@ -910,104 +933,143 @@
return rv;
}
+/*
+ * Zero-length write. Return a list of NFSD's current listener
+ * transports.
+ */
+static ssize_t __write_ports_names(char *buf)
+{
+ if (nfsd_serv == NULL)
+ return 0;
+ return svc_xprt_names(nfsd_serv, buf, SIMPLE_TRANSACTION_LIMIT);
+}
+
+/*
+ * A single 'fd' number was written, in which case it must be for
+ * a socket of a supported family/protocol, and we use it as an
+ * nfsd listener.
+ */
+static ssize_t __write_ports_addfd(char *buf)
+{
+ char *mesg = buf;
+ int fd, err;
+
+ err = get_int(&mesg, &fd);
+ if (err != 0 || fd < 0)
+ return -EINVAL;
+
+ err = nfsd_create_serv();
+ if (err != 0)
+ return err;
+
+ err = lockd_up();
+ if (err != 0)
+ goto out;
+
+ err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
+ if (err < 0)
+ lockd_down();
+
+out:
+ /* Decrease the count, but don't shut down the service */
+ nfsd_serv->sv_nrthreads--;
+ return err;
+}
+
+/*
+ * A '-' followed by the 'name' of a socket means we close the socket.
+ */
+static ssize_t __write_ports_delfd(char *buf)
+{
+ char *toclose;
+ int len = 0;
+
+ toclose = kstrdup(buf + 1, GFP_KERNEL);
+ if (toclose == NULL)
+ return -ENOMEM;
+
+ if (nfsd_serv != NULL)
+ len = svc_sock_names(nfsd_serv, buf,
+ SIMPLE_TRANSACTION_LIMIT, toclose);
+ if (len >= 0)
+ lockd_down();
+
+ kfree(toclose);
+ return len;
+}
+
+/*
+ * A transport listener is added by writing it's transport name and
+ * a port number.
+ */
+static ssize_t __write_ports_addxprt(char *buf)
+{
+ char transport[16];
+ int port, err;
+
+ if (sscanf(buf, "%15s %4u", transport, &port) != 2)
+ return -EINVAL;
+
+ if (port < 1 || port > USHORT_MAX)
+ return -EINVAL;
+
+ err = nfsd_create_serv();
+ if (err != 0)
+ return err;
+
+ err = svc_create_xprt(nfsd_serv, transport,
+ PF_INET, port, SVC_SOCK_ANONYMOUS);
+ if (err < 0) {
+ /* Give a reasonable perror msg for bad transport string */
+ if (err == -ENOENT)
+ err = -EPROTONOSUPPORT;
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * A transport listener is removed by writing a "-", it's transport
+ * name, and it's port number.
+ */
+static ssize_t __write_ports_delxprt(char *buf)
+{
+ struct svc_xprt *xprt;
+ char transport[16];
+ int port;
+
+ if (sscanf(&buf[1], "%15s %4u", transport, &port) != 2)
+ return -EINVAL;
+
+ if (port < 1 || port > USHORT_MAX || nfsd_serv == NULL)
+ return -EINVAL;
+
+ xprt = svc_find_xprt(nfsd_serv, transport, AF_UNSPEC, port);
+ if (xprt == NULL)
+ return -ENOTCONN;
+
+ svc_close_xprt(xprt);
+ svc_xprt_put(xprt);
+ return 0;
+}
+
static ssize_t __write_ports(struct file *file, char *buf, size_t size)
{
- if (size == 0) {
- int len = 0;
+ if (size == 0)
+ return __write_ports_names(buf);
- if (nfsd_serv)
- len = svc_xprt_names(nfsd_serv, buf, 0);
- return len;
- }
- /* Either a single 'fd' number is written, in which
- * case it must be for a socket of a supported family/protocol,
- * and we use it as an nfsd socket, or
- * A '-' followed by the 'name' of a socket in which case
- * we close the socket.
- */
- if (isdigit(buf[0])) {
- char *mesg = buf;
- int fd;
- int err;
- err = get_int(&mesg, &fd);
- if (err)
- return -EINVAL;
- if (fd < 0)
- return -EINVAL;
- err = nfsd_create_serv();
- if (!err) {
- err = svc_addsock(nfsd_serv, fd, buf);
- if (err >= 0) {
- err = lockd_up();
- if (err < 0)
- svc_sock_names(buf+strlen(buf)+1, nfsd_serv, buf);
- }
- /* Decrease the count, but don't shutdown the
- * the service
- */
- nfsd_serv->sv_nrthreads--;
- }
- return err < 0 ? err : 0;
- }
- if (buf[0] == '-' && isdigit(buf[1])) {
- char *toclose = kstrdup(buf+1, GFP_KERNEL);
- int len = 0;
- if (!toclose)
- return -ENOMEM;
- if (nfsd_serv)
- len = svc_sock_names(buf, nfsd_serv, toclose);
- if (len >= 0)
- lockd_down();
- kfree(toclose);
- return len;
- }
- /*
- * Add a transport listener by writing it's transport name
- */
- if (isalpha(buf[0])) {
- int err;
- char transport[16];
- int port;
- if (sscanf(buf, "%15s %4d", transport, &port) == 2) {
- if (port < 1 || port > 65535)
- return -EINVAL;
- err = nfsd_create_serv();
- if (!err) {
- err = svc_create_xprt(nfsd_serv,
- transport, PF_INET, port,
- SVC_SOCK_ANONYMOUS);
- if (err == -ENOENT)
- /* Give a reasonable perror msg for
- * bad transport string */
- err = -EPROTONOSUPPORT;
- }
- return err < 0 ? err : 0;
- }
- }
- /*
- * Remove a transport by writing it's transport name and port number
- */
- if (buf[0] == '-' && isalpha(buf[1])) {
- struct svc_xprt *xprt;
- int err = -EINVAL;
- char transport[16];
- int port;
- if (sscanf(&buf[1], "%15s %4d", transport, &port) == 2) {
- if (port < 1 || port > 65535)
- return -EINVAL;
- if (nfsd_serv) {
- xprt = svc_find_xprt(nfsd_serv, transport,
- AF_UNSPEC, port);
- if (xprt) {
- svc_close_xprt(xprt);
- svc_xprt_put(xprt);
- err = 0;
- } else
- err = -ENOTCONN;
- }
- return err < 0 ? err : 0;
- }
- }
+ if (isdigit(buf[0]))
+ return __write_ports_addfd(buf);
+
+ if (buf[0] == '-' && isdigit(buf[1]))
+ return __write_ports_delfd(buf);
+
+ if (isalpha(buf[0]))
+ return __write_ports_addxprt(buf);
+
+ if (buf[0] == '-' && isalpha(buf[1]))
+ return __write_ports_delxprt(buf);
+
return -EINVAL;
}
@@ -1030,7 +1092,9 @@
* buf: C string containing an unsigned
* integer value representing a bound
* but unconnected socket that is to be
- * used as an NFSD listener
+ * used as an NFSD listener; listen(3)
+ * must be called for a SOCK_STREAM
+ * socket, otherwise it is ignored
* size: non-zero length of C string in @buf
* Output:
* On success: NFS service is started;
@@ -1138,7 +1202,9 @@
nfsd_max_blksize = bsize;
mutex_unlock(&nfsd_mutex);
}
- return sprintf(buf, "%d\n", nfsd_max_blksize);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n",
+ nfsd_max_blksize);
}
#ifdef CONFIG_NFSD_V4
@@ -1162,8 +1228,9 @@
return -EINVAL;
nfs4_reset_lease(lease);
}
- sprintf(buf, "%ld\n", nfs4_lease_time());
- return strlen(buf);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n",
+ nfs4_lease_time());
}
/**
@@ -1219,8 +1286,9 @@
status = nfs4_reset_recoverydir(recdir);
}
- sprintf(buf, "%s\n", nfs4_recoverydir());
- return strlen(buf);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%s\n",
+ nfs4_recoverydir());
}
/**
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 9f1ca17..8847f3f 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -27,9 +27,6 @@
#define NFSDDBG_FACILITY NFSDDBG_FH
-static int nfsd_nr_verified;
-static int nfsd_nr_put;
-
/*
* our acceptability function.
* if NOSUBTREECHECK, accept anything
@@ -251,7 +248,6 @@
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
- nfsd_nr_verified++;
return 0;
out:
exp_put(exp);
@@ -552,7 +548,6 @@
return nfserr_opnotsupp;
}
- nfsd_nr_verified++;
return 0;
}
@@ -609,7 +604,6 @@
fhp->fh_pre_saved = 0;
fhp->fh_post_saved = 0;
#endif
- nfsd_nr_put++;
}
if (exp) {
cache_put(&exp->h, &svc_export_cache);
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index e298e26..0eb9c82 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -533,45 +533,179 @@
* NFSv2 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
-#define nfsd_proc_none NULL
-#define nfssvc_release_none NULL
struct nfsd_void { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsd_proc_##name, \
- (kxdrproc_t) nfssvc_decode_##argt, \
- (kxdrproc_t) nfssvc_encode_##rest, \
- (kxdrproc_t) nfssvc_release_##relt, \
- sizeof(struct nfsd_##argt), \
- sizeof(struct nfsd_##rest), \
- 0, \
- cache, \
- respsize, \
- }
-
#define ST 1 /* status */
#define FH 8 /* filehandle */
#define AT 18 /* attributes */
static struct svc_procedure nfsd_procedures2[18] = {
- PROC(null, void, void, none, RC_NOCACHE, ST),
- PROC(getattr, fhandle, attrstat, fhandle, RC_NOCACHE, ST+AT),
- PROC(setattr, sattrargs, attrstat, fhandle, RC_REPLBUFF, ST+AT),
- PROC(none, void, void, none, RC_NOCACHE, ST),
- PROC(lookup, diropargs, diropres, fhandle, RC_NOCACHE, ST+FH+AT),
- PROC(readlink, readlinkargs, readlinkres, none, RC_NOCACHE, ST+1+NFS_MAXPATHLEN/4),
- PROC(read, readargs, readres, fhandle, RC_NOCACHE, ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4),
- PROC(none, void, void, none, RC_NOCACHE, ST),
- PROC(write, writeargs, attrstat, fhandle, RC_REPLBUFF, ST+AT),
- PROC(create, createargs, diropres, fhandle, RC_REPLBUFF, ST+FH+AT),
- PROC(remove, diropargs, void, none, RC_REPLSTAT, ST),
- PROC(rename, renameargs, void, none, RC_REPLSTAT, ST),
- PROC(link, linkargs, void, none, RC_REPLSTAT, ST),
- PROC(symlink, symlinkargs, void, none, RC_REPLSTAT, ST),
- PROC(mkdir, createargs, diropres, fhandle, RC_REPLBUFF, ST+FH+AT),
- PROC(rmdir, diropargs, void, none, RC_REPLSTAT, ST),
- PROC(readdir, readdirargs, readdirres, none, RC_NOCACHE, 0),
- PROC(statfs, fhandle, statfsres, none, RC_NOCACHE, ST+5),
+ [NFSPROC_NULL] = {
+ .pc_func = (svc_procfunc) nfsd_proc_null,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_GETATTR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_getattr,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_SETATTR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_setattr,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_sattrargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_sattrargs),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_ROOT] = {
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_LOOKUP] = {
+ .pc_func = (svc_procfunc) nfsd_proc_lookup,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_READLINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_readlink,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readlinkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readlinkres,
+ .pc_argsize = sizeof(struct nfsd_readlinkargs),
+ .pc_ressize = sizeof(struct nfsd_readlinkres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
+ },
+ [NFSPROC_READ] = {
+ .pc_func = (svc_procfunc) nfsd_proc_read,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_readargs),
+ .pc_ressize = sizeof(struct nfsd_readres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+ },
+ [NFSPROC_WRITECACHE] = {
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_WRITE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_write,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_writeargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_writeargs),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_CREATE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_create,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_createargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_REMOVE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_remove,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_RENAME] = {
+ .pc_func = (svc_procfunc) nfsd_proc_rename,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_renameargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_renameargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_LINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_link,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_linkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_linkargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_SYMLINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_symlink,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_symlinkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_symlinkargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_MKDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_mkdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_createargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_RMDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_rmdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_READDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_readdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readdirargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readdirres,
+ .pc_argsize = sizeof(struct nfsd_readdirargs),
+ .pc_ressize = sizeof(struct nfsd_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFSPROC_STATFS] = {
+ .pc_func = (svc_procfunc) nfsd_proc_statfs,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_statfsres,
+ .pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_ressize = sizeof(struct nfsd_statfsres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+5,
+ },
};
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index cbba4a9..d4c9884 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -390,12 +390,14 @@
mutex_lock(&nfsd_mutex);
dprintk("nfsd: creating service\n");
- error = -EINVAL;
if (nrservs <= 0)
nrservs = 0;
if (nrservs > NFSD_MAXSERVS)
nrservs = NFSD_MAXSERVS;
-
+ error = 0;
+ if (nrservs == 0 && nfsd_serv == NULL)
+ goto out;
+
/* Readahead param cache - will no-op if it already exists */
error = nfsd_racache_init(2*nrservs);
if (error<0)
@@ -413,6 +415,12 @@
goto failure;
error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
+ if (error == 0)
+ /* We are holding a reference to nfsd_serv which
+ * we don't want to count in the return value,
+ * so subtract 1
+ */
+ error = nfsd_serv->sv_nrthreads - 1;
failure:
svc_destroy(nfsd_serv); /* Release server */
out:
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 99f8357..4145083 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -966,6 +966,43 @@
mutex_unlock(&dentry->d_inode->i_mutex);
}
+/*
+ * Gathered writes: If another process is currently writing to the file,
+ * there's a high chance this is another nfsd (triggered by a bulk write
+ * from a client's biod). Rather than syncing the file with each write
+ * request, we sleep for 10 msec.
+ *
+ * I don't know if this roughly approximates C. Juszak's idea of
+ * gathered writes, but it's a nice and simple solution (IMHO), and it
+ * seems to work:-)
+ *
+ * Note: we do this only in the NFSv2 case, since v3 and higher have a
+ * better tool (separate unstable writes and commits) for solving this
+ * problem.
+ */
+static int wait_for_concurrent_writes(struct file *file)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ static ino_t last_ino;
+ static dev_t last_dev;
+ int err = 0;
+
+ if (atomic_read(&inode->i_writecount) > 1
+ || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
+ dprintk("nfsd: write defer %d\n", task_pid_nr(current));
+ msleep(10);
+ dprintk("nfsd: write resume %d\n", task_pid_nr(current));
+ }
+
+ if (inode->i_state & I_DIRTY) {
+ dprintk("nfsd: write sync %d\n", task_pid_nr(current));
+ err = nfsd_sync(file);
+ }
+ last_ino = inode->i_ino;
+ last_dev = inode->i_sb->s_dev;
+ return err;
+}
+
static __be32
nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen,
@@ -978,6 +1015,7 @@
__be32 err = 0;
int host_err;
int stable = *stablep;
+ int use_wgather;
#ifdef MSNFS
err = nfserr_perm;
@@ -996,9 +1034,10 @@
* - the sync export option has been set, or
* - the client requested O_SYNC behavior (NFSv3 feature).
* - The file system doesn't support fsync().
- * When gathered writes have been configured for this volume,
+ * When NFSv2 gathered writes have been configured for this volume,
* flushing the data to disk is handled separately below.
*/
+ use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
if (!file->f_op->fsync) {/* COMMIT3 cannot work */
stable = 2;
@@ -1007,7 +1046,7 @@
if (!EX_ISSYNC(exp))
stable = 0;
- if (stable && !EX_WGATHER(exp)) {
+ if (stable && !use_wgather) {
spin_lock(&file->f_lock);
file->f_flags |= O_SYNC;
spin_unlock(&file->f_lock);
@@ -1017,52 +1056,20 @@
oldfs = get_fs(); set_fs(KERNEL_DS);
host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
set_fs(oldfs);
- if (host_err >= 0) {
- *cnt = host_err;
- nfsdstats.io_write += host_err;
- fsnotify_modify(file->f_path.dentry);
- }
+ if (host_err < 0)
+ goto out_nfserr;
+ *cnt = host_err;
+ nfsdstats.io_write += host_err;
+ fsnotify_modify(file->f_path.dentry);
/* clear setuid/setgid flag after write */
- if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
+ if (inode->i_mode & (S_ISUID | S_ISGID))
kill_suid(dentry);
- if (host_err >= 0 && stable) {
- static ino_t last_ino;
- static dev_t last_dev;
+ if (stable && use_wgather)
+ host_err = wait_for_concurrent_writes(file);
- /*
- * Gathered writes: If another process is currently
- * writing to the file, there's a high chance
- * this is another nfsd (triggered by a bulk write
- * from a client's biod). Rather than syncing the
- * file with each write request, we sleep for 10 msec.
- *
- * I don't know if this roughly approximates
- * C. Juszak's idea of gathered writes, but it's a
- * nice and simple solution (IMHO), and it seems to
- * work:-)
- */
- if (EX_WGATHER(exp)) {
- if (atomic_read(&inode->i_writecount) > 1
- || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
- dprintk("nfsd: write defer %d\n", task_pid_nr(current));
- msleep(10);
- dprintk("nfsd: write resume %d\n", task_pid_nr(current));
- }
-
- if (inode->i_state & I_DIRTY) {
- dprintk("nfsd: write sync %d\n", task_pid_nr(current));
- host_err=nfsd_sync(file);
- }
-#if 0
- wake_up(&inode->i_wait);
-#endif
- }
- last_ino = inode->i_ino;
- last_dev = inode->i_sb->s_dev;
- }
-
+out_nfserr:
dprintk("nfsd: write complete host_err=%d\n", host_err);
if (host_err >= 0)
err = 0;
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 54e8b3d..eddbce0 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -24,7 +24,10 @@
D(13) KM_SYNC_ICACHE,
D(14) KM_SYNC_DCACHE,
D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
-D(16) KM_TYPE_NR
+D(16) KM_IRQ_PTE,
+D(17) KM_NMI,
+D(18) KM_NMI_PTE,
+D(19) KM_TYPE_NR
};
#undef D
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 55413e5..92b73b6 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -625,7 +625,7 @@
*(.init.ramfs) \
VMLINUX_SYMBOL(__initramfs_end) = .;
#else
-#define INITRAMFS
+#define INIT_RAM_FS
#endif
/**
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 2643d84..4d668e0 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -69,7 +69,6 @@
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
-extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
@@ -84,10 +83,6 @@
{
}
-static inline void cpu_hotplug_init(void)
-{
-}
-
static inline void cpu_maps_update_begin(void)
{
}
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 1a455f1..5619f85 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -13,6 +13,10 @@
#define DMA_PTE_WRITE (2)
#define DMA_PTE_SNP (1 << 11)
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1
+#define CONTEXT_TT_PASS_THROUGH 2
+
struct intel_iommu;
struct dmar_domain;
struct root_entry;
@@ -21,11 +25,16 @@
#ifdef CONFIG_DMAR
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
return 0;
}
+static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
#endif
extern int dmar_disabled;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 10ff5c4..1731fb5 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -188,6 +188,15 @@
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+
+struct dmar_atsr_unit {
+ struct list_head list; /* list of ATSR units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct pci_dev **devices; /* target devices */
+ int devices_cnt; /* target device count */
+ u8 include_all:1; /* include all ports */
+};
+
/* Intel DMAR initialization functions */
extern int intel_iommu_init(void);
#else
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index e584b72..9823946 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -3,6 +3,7 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -355,4 +356,90 @@
int generation, int speed, unsigned long long offset,
void *payload, size_t length);
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+ return tag << 14 | channel << 8 | sy;
+}
+
+struct fw_descriptor {
+ struct list_head link;
+ size_t length;
+ u32 immediate;
+ u32 key;
+ const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+struct fw_iso_packet {
+ u16 payload_length; /* Length of indirect payload. */
+ u32 interrupt:1; /* Generate interrupt on this packet */
+ u32 skip:1; /* Set to not send packet at all. */
+ u32 tag:2;
+ u32 sy:4;
+ u32 header_length:8; /* Length of immediate header. */
+ u32 header[0];
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT 0
+#define FW_ISO_CONTEXT_RECEIVE 1
+
+#define FW_ISO_CONTEXT_MATCH_TAG0 1
+#define FW_ISO_CONTEXT_MATCH_TAG1 2
+#define FW_ISO_CONTEXT_MATCH_TAG2 4
+#define FW_ISO_CONTEXT_MATCH_TAG3 8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction. Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space. We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+struct fw_iso_buffer {
+ enum dma_data_direction direction;
+ struct page **pages;
+ int page_count;
+};
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context;
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+ u32 cycle, size_t header_length,
+ void *header, void *data);
+struct fw_iso_context {
+ struct fw_card *card;
+ int type;
+ int channel;
+ int speed;
+ size_t header_size;
+ fw_iso_callback_t callback;
+ void *callback_data;
+};
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+ int type, int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+ int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
+
#endif /* _LINUX_FIREWIRE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 74a5793..1ff5e4e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1107,6 +1107,7 @@
extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
+extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 39b95c5..dc3b132 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -362,6 +362,7 @@
unsigned long func;
unsigned long long calltime;
unsigned long long subtime;
+ unsigned long fp;
};
/*
@@ -372,7 +373,8 @@
extern void return_to_handler(void);
extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+ unsigned long frame_pointer);
/*
* Sometimes we don't want to trace a function with the function
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index aa8c531..482dc91 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -53,6 +53,7 @@
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
@@ -120,8 +121,10 @@
(ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
+#define ecap_pass_through(e) ((e >> 6) & 0x1)
#define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1)
+#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
@@ -197,6 +200,8 @@
#define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1)
#define DMA_FSTS_IQE (1 << 4)
+#define DMA_FSTS_ICE (1 << 5)
+#define DMA_FSTS_ITE (1 << 6)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
@@ -225,7 +230,8 @@
enum {
QI_FREE,
QI_IN_USE,
- QI_DONE
+ QI_DONE,
+ QI_ABORT
};
#define QI_CC_TYPE 0x1
@@ -254,6 +260,12 @@
#define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_SIZE 1
+#define QI_DEV_IOTLB_MAX_INVS 32
+
struct qi_desc {
u64 low, high;
};
@@ -280,10 +292,10 @@
#endif
struct iommu_flush {
- int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
- u64 type, int non_present_entry_flush);
- int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type, int non_present_entry_flush);
+ void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+ void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
};
enum {
@@ -302,6 +314,7 @@
spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
unsigned int irq;
unsigned char name[13]; /* Device Name */
@@ -329,6 +342,7 @@
}
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu);
@@ -337,11 +351,12 @@
extern int dmar_reenable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
-extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type, int non_present_entry_flush);
-extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- int non_present_entry_flush);
+extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 32e4b2f..786e7b8 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -49,6 +49,8 @@
#define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */
#define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */
+#define IORESOURCE_MEM_64 0x00100000
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 1e50c34..cb2e77a 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -157,7 +157,7 @@
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @affinity: IRQ affinity on SMP
- * @cpu: cpu index useful for balancing
+ * @node: node index useful for balancing
* @pending_mask: pending rebalanced interrupts
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
@@ -423,7 +423,7 @@
/**
* alloc_desc_masks - allocate cpumasks for irq_desc
* @desc: pointer to irq_desc struct
- * @cpu: cpu which will be handling the cpumasks
+ * @node: node which will be handling the cpumasks
* @boot: true if need bootmem
*
* Allocates affinity and pending_mask cpumask if required.
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 51855df..c325b18 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -195,7 +195,7 @@
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
extern int nsm_use_hostnames;
-extern int nsm_local_state;
+extern u32 nsm_local_state;
/*
* Lockd client functions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d88d6fc..d006e93 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -810,11 +810,11 @@
#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access);
+ unsigned long address, unsigned int flags);
#else
static inline int handle_mm_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
- int write_access)
+ unsigned int flags)
{
/* should never happen if there's no MMU */
BUG();
@@ -854,6 +854,12 @@
unsigned long end, unsigned long newflags);
/*
+ * doesn't attempt to fault and will return short.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+
+/*
* A callback you can register to apply pressure to ageable caches.
*
* 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 3a05929..3beb259 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -24,16 +24,10 @@
struct fs_struct;
+extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct fs_struct *);
-extern void __put_mnt_ns(struct mnt_namespace *ns);
-
-static inline void put_mnt_ns(struct mnt_namespace *ns)
-{
- if (atomic_dec_and_lock(&ns->count, &vfsmount_lock))
- /* releases vfsmount_lock */
- __put_mnt_ns(ns);
-}
+extern void put_mnt_ns(struct mnt_namespace *ns);
static inline void exit_mnt_ns(struct task_struct *p)
{
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7efb9be..4030eba 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -563,6 +563,7 @@
* @options: Option flags, e.g. 16bit buswidth
* @ecclayout: ecc layout info structure
* @part_probe_types: NULL-terminated array of probe types
+ * @set_parts: platform specific function to set partitions
* @priv: hardware controller specific settings
*/
struct platform_nand_chip {
@@ -574,26 +575,41 @@
int chip_delay;
unsigned int options;
const char **part_probe_types;
+ void (*set_parts)(uint64_t size,
+ struct platform_nand_chip *chip);
void *priv;
};
+/* Keep gcc happy */
+struct platform_device;
+
/**
* struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
* @hwcontrol: platform specific hardware control structure
* @dev_ready: platform specific function to read ready/busy pin
* @select_chip: platform specific chip select function
* @cmd_ctrl: platform specific function for controlling
* ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
* @priv: private data to transport driver specific settings
*
* All fields are optional and depend on the hardware driver requirements
*/
struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
void (*hwcontrol)(struct mtd_info *mtd, int cmd);
int (*dev_ready)(struct mtd_info *mtd);
void (*select_chip)(struct mtd_info *mtd, int chip);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat,
unsigned int ctrl);
+ void (*write_buf)(struct mtd_info *mtd,
+ const uint8_t *buf, int len);
+ void (*read_buf)(struct mtd_info *mtd,
+ uint8_t *buf, int len);
void *priv;
};
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 9aa2a91..8ed8733 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -17,6 +17,7 @@
#include <linux/mtd/onenand_regs.h>
#include <linux/mtd/bbm.h>
+#define MAX_DIES 2
#define MAX_BUFFERRAM 2
/* Scan and identify a OneNAND device */
@@ -51,7 +52,12 @@
/**
* struct onenand_chip - OneNAND Private Flash Chip Data
* @base: [BOARDSPECIFIC] address to access OneNAND
+ * @dies: [INTERN][FLEX-ONENAND] number of dies on chip
+ * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies
+ * @diesize: [INTERN][FLEX-ONENAND] Size of the dies
* @chipsize: [INTERN] the size of one chip for multichip arrays
+ * FIXME For Flex-OneNAND, chipsize holds maximum possible
+ * device size ie when all blocks are considered MLC
* @device_id: [INTERN] device ID
* @density_mask: chip density, used for DDP devices
* @verstion_id: [INTERN] version ID
@@ -68,6 +74,8 @@
* @command: [REPLACEABLE] hardware specific function for writing
* commands to the chip
* @wait: [REPLACEABLE] hardware specific function for wait on ready
+ * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready
+ * @unlock_all: [REPLACEABLE] hardware specific function for unlock all
* @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
* @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
* @read_word: [REPLACEABLE] hardware specific function for read
@@ -92,9 +100,13 @@
*/
struct onenand_chip {
void __iomem *base;
+ unsigned dies;
+ unsigned boundary[MAX_DIES];
+ loff_t diesize[MAX_DIES];
unsigned int chipsize;
unsigned int device_id;
unsigned int version_id;
+ unsigned int technology;
unsigned int density_mask;
unsigned int options;
@@ -108,6 +120,8 @@
int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len);
int (*wait)(struct mtd_info *mtd, int state);
+ int (*bbt_wait)(struct mtd_info *mtd, int state);
+ void (*unlock_all)(struct mtd_info *mtd);
int (*read_bufferram)(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count);
int (*write_bufferram)(struct mtd_info *mtd, int area,
@@ -145,6 +159,8 @@
#define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0)
#define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1)
+#define FLEXONENAND(this) \
+ (this->device_id & DEVICE_IS_FLEXONENAND)
#define ONENAND_GET_SYS_CFG1(this) \
(this->read_word(this->base + ONENAND_REG_SYS_CFG1))
#define ONENAND_SET_SYS_CFG1(v, this) \
@@ -153,6 +169,9 @@
#define ONENAND_IS_DDP(this) \
(this->device_id & ONENAND_DEVICE_IS_DDP)
+#define ONENAND_IS_MLC(this) \
+ (this->technology & ONENAND_TECHNOLOGY_IS_MLC)
+
#ifdef CONFIG_MTD_ONENAND_2X_PROGRAM
#define ONENAND_IS_2PLANE(this) \
(this->options & ONENAND_HAS_2PLANE)
@@ -169,6 +188,7 @@
#define ONENAND_HAS_CONT_LOCK (0x0001)
#define ONENAND_HAS_UNLOCK_ALL (0x0002)
#define ONENAND_HAS_2PLANE (0x0004)
+#define ONENAND_SKIP_UNLOCK_CHECK (0x0100)
#define ONENAND_PAGEBUF_ALLOC (0x1000)
#define ONENAND_OOBBUF_ALLOC (0x2000)
@@ -176,6 +196,7 @@
* OneNAND Flash Manufacturer ID Codes
*/
#define ONENAND_MFR_SAMSUNG 0xec
+#define ONENAND_MFR_NUMONYX 0x20
/**
* struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
@@ -189,5 +210,8 @@
int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
+unsigned onenand_block(struct onenand_chip *this, loff_t addr);
+loff_t onenand_addr(struct onenand_chip *this, int block);
+int flexonenand_region(struct mtd_info *mtd, loff_t addr);
#endif /* __LINUX_MTD_ONENAND_H */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index 0c6bbe2..86a6bbe 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -67,6 +67,9 @@
/*
* Device ID Register F001h (R)
*/
+#define DEVICE_IS_FLEXONENAND (1 << 9)
+#define FLEXONENAND_PI_MASK (0x3ff)
+#define FLEXONENAND_PI_UNLOCK_SHIFT (14)
#define ONENAND_DEVICE_DENSITY_MASK (0xf)
#define ONENAND_DEVICE_DENSITY_SHIFT (4)
#define ONENAND_DEVICE_IS_DDP (1 << 3)
@@ -84,6 +87,11 @@
#define ONENAND_VERSION_PROCESS_SHIFT (8)
/*
+ * Technology Register F006h (R)
+ */
+#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0)
+
+/*
* Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W)
*/
#define ONENAND_DDP_SHIFT (15)
@@ -93,7 +101,8 @@
/*
* Start Address 8 F107h (R/W)
*/
-#define ONENAND_FPA_MASK (0x3f)
+/* Note: It's actually 0x3f in case of SLC */
+#define ONENAND_FPA_MASK (0x7f)
#define ONENAND_FPA_SHIFT (2)
#define ONENAND_FSA_MASK (0x03)
@@ -105,7 +114,8 @@
#define ONENAND_BSA_BOOTRAM (0 << 2)
#define ONENAND_BSA_DATARAM0 (2 << 2)
#define ONENAND_BSA_DATARAM1 (3 << 2)
-#define ONENAND_BSC_MASK (0x03)
+/* Note: It's actually 0x03 in case of SLC */
+#define ONENAND_BSC_MASK (0x07)
/*
* Command Register F220h (R/W)
@@ -124,9 +134,13 @@
#define ONENAND_CMD_RESET (0xF0)
#define ONENAND_CMD_OTP_ACCESS (0x65)
#define ONENAND_CMD_READID (0x90)
+#define FLEXONENAND_CMD_PI_UPDATE (0x05)
+#define FLEXONENAND_CMD_PI_ACCESS (0x66)
+#define FLEXONENAND_CMD_RECOVER_LSB (0x05)
/* NOTE: Those are not *REAL* commands */
#define ONENAND_CMD_BUFFERRAM (0x1978)
+#define FLEXONENAND_CMD_READ_PI (0x1985)
/*
* System Configuration 1 Register F221h (R, R/W)
@@ -192,10 +206,12 @@
#define ONENAND_ECC_1BIT_ALL (0x5555)
#define ONENAND_ECC_2BIT (1 << 1)
#define ONENAND_ECC_2BIT_ALL (0xAAAA)
+#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010)
/*
* One-Time Programmable (OTP)
*/
+#define FLEXONENAND_OTP_LOCK_OFFSET (2048)
#define ONENAND_OTP_LOCK_OFFSET (14)
#endif /* __ONENAND_REG_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 7535a74..af6dcb9 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -40,7 +40,6 @@
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
- struct mtd_info **mtdp; /* pointer to store the MTD object */
};
#define MTDPART_OFS_NXTBLK (-2)
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 214d499..f387919 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -25,8 +25,9 @@
#define NFSMODE_SOCK 0140000
#define NFSMODE_FIFO 0010000
-#define NFS_MNT_PROGRAM 100005
-#define NFS_MNT_PORT 627
+#define NFS_MNT_PROGRAM 100005
+#define NFS_MNT_VERSION 1
+#define NFS_MNT3_VERSION 3
/*
* NFS stats. The good thing with these values is that NFSv3 errors are
diff --git a/include/linux/nfs2.h b/include/linux/nfs2.h
index 0ed9517..fde24b3 100644
--- a/include/linux/nfs2.h
+++ b/include/linux/nfs2.h
@@ -64,11 +64,4 @@
#define NFSPROC_READDIR 16
#define NFSPROC_STATFS 17
-#define NFS_MNT_PROGRAM 100005
-#define NFS_MNT_VERSION 1
-#define MNTPROC_NULL 0
-#define MNTPROC_MNT 1
-#define MNTPROC_UMNT 3
-#define MNTPROC_UMNTALL 4
-
#endif /* _LINUX_NFS2_H */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index 539f3b5..ac33806 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -88,12 +88,7 @@
#define NFS3PROC_PATHCONF 20
#define NFS3PROC_COMMIT 21
-#define NFS_MNT3_PROGRAM 100005
#define NFS_MNT3_VERSION 3
-#define MOUNTPROC3_NULL 0
-#define MOUNTPROC3_MNT 1
-#define MOUNTPROC3_UMNT 3
-#define MOUNTPROC3_UMNTALL 4
#if defined(__KERNEL__)
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e3f0cbc..bd2eba5 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -21,6 +21,7 @@
#define NFS4_FHSIZE 128
#define NFS4_MAXPATHLEN PATH_MAX
#define NFS4_MAXNAMLEN NAME_MAX
+#define NFS4_OPAQUE_LIMIT 1024
#define NFS4_MAX_SESSIONID_LEN 16
#define NFS4_ACCESS_READ 0x0001
@@ -130,6 +131,16 @@
#define NFS4_MAX_UINT64 (~(u64)0)
+/* An NFS4 sessions server must support at least NFS4_MAX_OPS operations.
+ * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly.
+ */
+#define NFS4_MAX_OPS 8
+
+/* Our NFS4 client back channel server only wants the cb_sequene and the
+ * actual operation per compound
+ */
+#define NFS4_MAX_BACK_CHANNEL_OPS 2
+
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
NFS4_ACL_WHO_OWNER,
@@ -462,6 +473,13 @@
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
#define NFS4_MINOR_VERSION 0
+
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MINOR_VERSION 1
+#else
+#define NFS4_MAX_MINOR_VERSION 0
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFS4_DEBUG 1
/* Index of predefined Linux client operations */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 6ad7594..19fe15d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -4,11 +4,17 @@
#include <linux/list.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
+#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/xprt.h>
#include <asm/atomic.h>
+struct nfs4_session;
struct nfs_iostats;
struct nlm_host;
+struct nfs4_sequence_args;
+struct nfs4_sequence_res;
+struct nfs_server;
/*
* The nfs_client identifies our client state to the server.
@@ -18,6 +24,7 @@
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
+#define NFS_CS_SESSION_INITING 2 /* busy initialising session */
unsigned long cl_res_state; /* NFS resources state */
#define NFS_CS_CALLBACK 1 /* - callback started */
#define NFS_CS_IDMAP 2 /* - idmap started */
@@ -32,6 +39,7 @@
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
int cl_proto; /* Network transport protocol */
+ u32 cl_minorversion;/* NFSv4 minorversion */
struct rpc_cred *cl_machine_cred;
#ifdef CONFIG_NFS_V4
@@ -63,7 +71,22 @@
*/
char cl_ipaddr[48];
unsigned char cl_id_uniquifier;
-#endif
+ int (* cl_call_sync)(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+#endif /* CONFIG_NFS_V4 */
+
+#ifdef CONFIG_NFS_V4_1
+ /* clientid returned from EXCHANGE_ID, used by session operations */
+ u64 cl_ex_clid;
+ /* The sequence id to use for the next CREATE_SESSION */
+ u32 cl_seqid;
+ /* The flags used for obtaining the clientid during EXCHANGE_ID */
+ u32 cl_exchange_flags;
+ struct nfs4_session *cl_session; /* sharred session */
+#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
@@ -145,4 +168,46 @@
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
+
+/* maximum number of slots to use */
+#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
+
+#if defined(CONFIG_NFS_V4_1)
+
+/* Sessions */
+#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
+struct nfs4_slot_table {
+ struct nfs4_slot *slots; /* seqid per slot */
+ unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
+ spinlock_t slot_tbl_lock;
+ struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
+ int max_slots; /* # slots in table */
+ int highest_used_slotid; /* sent to server on each SEQ.
+ * op for dynamic resizing */
+};
+
+static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
+{
+ return sp - tbl->slots;
+}
+
+/*
+ * Session related parameters
+ */
+struct nfs4_session {
+ struct nfs4_sessionid sess_id;
+ u32 flags;
+ unsigned long session_state;
+ u32 hash_alg;
+ u32 ssv_len;
+
+ /* The fore and back channel */
+ struct nfs4_channel_attrs fc_attrs;
+ struct nfs4_slot_table fc_slot_table;
+ struct nfs4_channel_attrs bc_attrs;
+ struct nfs4_slot_table bc_slot_table;
+ struct nfs_client *clp;
+};
+
+#endif /* CONFIG_NFS_V4_1 */
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b89c34e..62f63fb 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -145,6 +145,44 @@
};
struct nfs_seqid;
+
+/* nfs41 sessions channel attributes */
+struct nfs4_channel_attrs {
+ u32 headerpadsz;
+ u32 max_rqst_sz;
+ u32 max_resp_sz;
+ u32 max_resp_sz_cached;
+ u32 max_ops;
+ u32 max_reqs;
+};
+
+/* nfs41 sessions slot seqid */
+struct nfs4_slot {
+ u32 seq_nr;
+};
+
+struct nfs4_sequence_args {
+ struct nfs4_session *sa_session;
+ u8 sa_slotid;
+ u8 sa_cache_this;
+};
+
+struct nfs4_sequence_res {
+ struct nfs4_session *sr_session;
+ u8 sr_slotid; /* slot used to send request */
+ unsigned long sr_renewal_time;
+ int sr_status; /* sequence operation status */
+};
+
+struct nfs4_get_lease_time_args {
+ struct nfs4_sequence_args la_seq_args;
+};
+
+struct nfs4_get_lease_time_res {
+ struct nfs_fsinfo *lr_fsinfo;
+ struct nfs4_sequence_res lr_seq_res;
+};
+
/*
* Arguments to the open call.
*/
@@ -165,6 +203,7 @@
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
__u32 claim;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_openres {
@@ -181,6 +220,7 @@
__u32 do_recall;
__u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -206,6 +246,7 @@
struct nfs_seqid * seqid;
fmode_t fmode;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_closeres {
@@ -213,6 +254,7 @@
struct nfs_fattr * fattr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
* * Arguments to the lock,lockt, and locku call.
@@ -233,12 +275,14 @@
unsigned char block : 1;
unsigned char reclaim : 1;
unsigned char new_lock_owner : 1;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_lock_res {
nfs4_stateid stateid;
struct nfs_seqid * lock_seqid;
struct nfs_seqid * open_seqid;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_locku_args {
@@ -246,32 +290,38 @@
struct file_lock * fl;
struct nfs_seqid * seqid;
nfs4_stateid * stateid;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_locku_res {
nfs4_stateid stateid;
struct nfs_seqid * seqid;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_lockt_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_lowner lock_owner;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_lockt_res {
struct file_lock * denied; /* LOCK, LOCKT failed */
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_delegreturnargs {
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_delegreturnres {
struct nfs_fattr * fattr;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -284,12 +334,14 @@
__u32 count;
unsigned int pgbase;
struct page ** pages;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_readres {
struct nfs_fattr * fattr;
__u32 count;
int eof;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -304,6 +356,7 @@
unsigned int pgbase;
struct page ** pages;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_writeverf {
@@ -316,6 +369,7 @@
struct nfs_writeverf * verf;
__u32 count;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -325,12 +379,14 @@
const struct nfs_fh *fh;
struct qstr name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_removeres {
const struct nfs_server *server;
struct nfs4_change_info cinfo;
struct nfs_fattr dir_attr;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -383,6 +439,7 @@
struct iattr * iap;
const struct nfs_server * server; /* Needed for name mapping */
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_setaclargs {
@@ -390,6 +447,11 @@
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_setaclres {
+ struct nfs4_sequence_res seq_res;
};
struct nfs_getaclargs {
@@ -397,11 +459,18 @@
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_getaclres {
+ size_t acl_len;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_setattrres {
struct nfs_fattr * fattr;
const struct nfs_server * server;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_linkargs {
@@ -583,6 +652,7 @@
const struct nfs_fh * fh;
const u32 * bitmask;
u32 access;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_accessres {
@@ -590,6 +660,7 @@
struct nfs_fattr * fattr;
u32 supported;
u32 access;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_create_arg {
@@ -609,6 +680,7 @@
const struct iattr * attrs;
const struct nfs_fh * dir_fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_create_res {
@@ -617,21 +689,30 @@
struct nfs_fattr * fattr;
struct nfs4_change_info dir_cinfo;
struct nfs_fattr * dir_fattr;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_fsinfo_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_fsinfo_res {
+ struct nfs_fsinfo *fsinfo;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_getattr_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_getattr_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_link_arg {
@@ -639,6 +720,7 @@
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_link_res {
@@ -646,6 +728,7 @@
struct nfs_fattr * fattr;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
+ struct nfs4_sequence_res seq_res;
};
@@ -653,21 +736,30 @@
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_lookup_root_arg {
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_pathconf_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_pathconf_res {
+ struct nfs_pathconf *pathconf;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_readdir_arg {
@@ -678,11 +770,13 @@
struct page ** pages; /* zero-copy data */
unsigned int pgbase; /* zero-copy data */
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_readdir_res {
nfs4_verifier verifier;
unsigned int pgbase;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_readlink {
@@ -690,6 +784,11 @@
unsigned int pgbase;
unsigned int pglen; /* zero-copy data */
struct page ** pages; /* zero-copy data */
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_readlink_res {
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_rename_arg {
@@ -698,6 +797,7 @@
const struct qstr * old_name;
const struct qstr * new_name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_rename_res {
@@ -706,6 +806,7 @@
struct nfs_fattr * old_fattr;
struct nfs4_change_info new_cinfo;
struct nfs_fattr * new_fattr;
+ struct nfs4_sequence_res seq_res;
};
#define NFS4_SETCLIENTID_NAMELEN (127)
@@ -724,6 +825,17 @@
struct nfs4_statfs_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_statfs_res {
+ struct nfs_fsstat *fsstat;
+ struct nfs4_sequence_res seq_res;
+};
+
+struct nfs4_server_caps_arg {
+ struct nfs_fh *fhandle;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_server_caps_res {
@@ -731,6 +843,7 @@
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_string {
@@ -765,10 +878,68 @@
const struct qstr *name;
struct page *page;
const u32 *bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_fs_locations_res {
+ struct nfs4_fs_locations *fs_locations;
+ struct nfs4_sequence_res seq_res;
};
#endif /* CONFIG_NFS_V4 */
+struct nfstime4 {
+ u64 seconds;
+ u32 nseconds;
+};
+
+#ifdef CONFIG_NFS_V4_1
+struct nfs_impl_id4 {
+ u32 domain_len;
+ char *domain;
+ u32 name_len;
+ char *name;
+ struct nfstime4 date;
+};
+
+#define NFS4_EXCHANGE_ID_LEN (48)
+struct nfs41_exchange_id_args {
+ struct nfs_client *client;
+ nfs4_verifier *verifier;
+ unsigned int id_len;
+ char id[NFS4_EXCHANGE_ID_LEN];
+ u32 flags;
+};
+
+struct server_owner {
+ uint64_t minor_id;
+ uint32_t major_id_sz;
+ char major_id[NFS4_OPAQUE_LIMIT];
+};
+
+struct server_scope {
+ uint32_t server_scope_sz;
+ char server_scope[NFS4_OPAQUE_LIMIT];
+};
+
+struct nfs41_exchange_id_res {
+ struct nfs_client *client;
+ u32 flags;
+};
+
+struct nfs41_create_session_args {
+ struct nfs_client *client;
+ uint32_t flags;
+ uint32_t cb_program;
+ struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
+ struct nfs4_channel_attrs bc_attrs; /* Back Channel */
+};
+
+struct nfs41_create_session_res {
+ struct nfs_client *client;
+};
+#endif /* CONFIG_NFS_V4_1 */
+
struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
index 5bccaab..3a3f589 100644
--- a/include/linux/nfsd/cache.h
+++ b/include/linux/nfsd/cache.h
@@ -14,8 +14,7 @@
#include <linux/uio.h>
/*
- * Representation of a reply cache entry. The first two members *must*
- * be hash_next and hash_prev.
+ * Representation of a reply cache entry.
*/
struct svc_cacherep {
struct hlist_node c_hash;
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index afa1901..8f641c9 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -151,9 +151,15 @@
__u64 fh_pre_size; /* size before operation */
struct timespec fh_pre_mtime; /* mtime before oper */
struct timespec fh_pre_ctime; /* ctime before oper */
+ /*
+ * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode)
+ * to find out if it is valid.
+ */
+ u64 fh_pre_change;
/* Post-op attributes saved in fh_unlock */
struct kstat fh_post_attr; /* full attrs after operation */
+ u64 fh_post_change; /* nfsv4 change; see above */
#endif /* CONFIG_NFSD_V3 */
} svc_fh;
@@ -298,6 +304,7 @@
fhp->fh_pre_mtime = inode->i_mtime;
fhp->fh_pre_ctime = inode->i_ctime;
fhp->fh_pre_size = inode->i_size;
+ fhp->fh_pre_change = inode->i_version;
fhp->fh_pre_saved = 1;
}
}
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 4d61c87..57ab2ed 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -41,7 +41,6 @@
#include <linux/kref.h>
#include <linux/sunrpc/clnt.h>
-#define NFS4_OPAQUE_LIMIT 1024
typedef struct {
u32 cl_boot;
u32 cl_id;
@@ -61,15 +60,6 @@
#define si_stateownerid si_opaque.so_stateownerid
#define si_fileid si_opaque.so_fileid
-
-struct nfs4_cb_recall {
- u32 cbr_ident;
- int cbr_trunc;
- stateid_t cbr_stateid;
- struct knfsd_fh cbr_fh;
- struct nfs4_delegation *cbr_dp;
-};
-
struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
@@ -81,22 +71,25 @@
struct file *dl_vfs_file;
u32 dl_type;
time_t dl_time;
- struct nfs4_cb_recall dl_recall;
+/* For recall: */
+ u32 dl_ident;
+ stateid_t dl_stateid;
+ struct knfsd_fh dl_fh;
+ int dl_retries;
};
-#define dl_stateid dl_recall.cbr_stateid
-#define dl_fh dl_recall.cbr_fh
-
/* client delegation callback info */
-struct nfs4_callback {
+struct nfs4_cb_conn {
/* SETCLIENTID info */
u32 cb_addr;
unsigned short cb_port;
u32 cb_prog;
- u32 cb_ident;
+ u32 cb_minorversion;
+ u32 cb_ident; /* minorversion 0 only */
/* RPC client info */
atomic_t cb_set; /* successful CB_NULL call */
struct rpc_clnt * cb_client;
+ struct rpc_cred * cb_cred;
};
/* Maximum number of slots per session. 128 is useful for long haul TCP */
@@ -122,6 +115,17 @@
struct nfsd4_cache_entry sl_cache_entry;
};
+struct nfsd4_channel_attrs {
+ u32 headerpadsz;
+ u32 maxreq_sz;
+ u32 maxresp_sz;
+ u32 maxresp_cached;
+ u32 maxops;
+ u32 maxreqs;
+ u32 nr_rdma_attrs;
+ u32 rdma_attrs;
+};
+
struct nfsd4_session {
struct kref se_ref;
struct list_head se_hash; /* hash by sessionid */
@@ -129,11 +133,8 @@
u32 se_flags;
struct nfs4_client *se_client; /* for expire_client */
struct nfs4_sessionid se_sessionid;
- u32 se_fmaxreq_sz;
- u32 se_fmaxresp_sz;
- u32 se_fmaxresp_cached;
- u32 se_fmaxops;
- u32 se_fnumslots;
+ struct nfsd4_channel_attrs se_fchannel;
+ struct nfsd4_channel_attrs se_bchannel;
struct nfsd4_slot se_slots[]; /* forward channel slots */
};
@@ -185,7 +186,7 @@
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
- struct nfs4_callback cl_callback; /* callback info */
+ struct nfs4_cb_conn cl_cb_conn; /* callback info */
atomic_t cl_count; /* ref count */
u32 cl_firststate; /* recovery dir creation */
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index f80d601..2bacf75 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -64,10 +64,13 @@
struct nfsd4_change_info {
u32 atomic;
+ bool change_supported;
u32 before_ctime_sec;
u32 before_ctime_nsec;
+ u64 before_change;
u32 after_ctime_sec;
u32 after_ctime_nsec;
+ u64 after_change;
};
struct nfsd4_access {
@@ -363,17 +366,6 @@
int spa_how;
};
-struct nfsd4_channel_attrs {
- u32 headerpadsz;
- u32 maxreq_sz;
- u32 maxresp_sz;
- u32 maxresp_cached;
- u32 maxops;
- u32 maxreqs;
- u32 nr_rdma_attrs;
- u32 rdma_attrs;
-};
-
struct nfsd4_create_session {
clientid_t clientid;
struct nfs4_sessionid sessionid;
@@ -503,10 +495,16 @@
{
BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
cinfo->atomic = 1;
- cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
- cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
- cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
- cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
+ if (cinfo->change_supported) {
+ cinfo->before_change = fhp->fh_pre_change;
+ cinfo->after_change = fhp->fh_post_change;
+ } else {
+ cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
+ cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
+ cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
+ cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ }
}
int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 092e82e..93a7c08 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -15,7 +15,7 @@
{
struct pci_bus *pbus = pdev->bus;
/* Find a PCI root bus */
- while (pbus->parent)
+ while (!pci_is_root_bus(pbus))
pbus = pbus->parent;
return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
pbus->number);
@@ -23,7 +23,7 @@
static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
{
- if (pbus->parent)
+ if (!pci_is_root_bus(pbus))
return DEVICE_ACPI_HANDLE(&(pbus->self->dev));
return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
pbus->number);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8e366bb..d304ddf 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -196,6 +196,7 @@
struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
+struct pci_ats;
/*
* The pci_dev structure is used to describe PCI devices.
@@ -293,6 +294,7 @@
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */
};
+ struct pci_ats *ats; /* Address Translation Service */
#endif
};
@@ -607,8 +609,6 @@
struct pci_dev __deprecated *pci_find_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from);
-struct pci_dev __deprecated *pci_find_slot(unsigned int bus,
- unsigned int devfn);
#endif /* CONFIG_PCI_LEGACY */
enum pci_lost_interrupt_reason {
@@ -647,6 +647,7 @@
int where, u16 val);
int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 val);
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
{
@@ -711,8 +712,8 @@
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
+int __pci_reset_function(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
-int pci_execute_reset_function(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
@@ -732,7 +733,7 @@
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
int pci_wake_from_d3(struct pci_dev *dev, bool enable);
pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
@@ -798,7 +799,7 @@
int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
int pass);
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
int pci_cfg_space_size_ext(struct pci_dev *dev);
int pci_cfg_space_size(struct pci_dev *dev);
@@ -888,6 +889,17 @@
extern int pcie_aspm_enabled(void);
#endif
+#ifndef CONFIG_PCIE_ECRC
+static inline void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ return;
+}
+static inline void pcie_ecrc_get_policy(char *str) {};
+#else
+extern void pcie_set_ecrc_checking(struct pci_dev *dev);
+extern void pcie_ecrc_get_policy(char *str);
+#endif
+
#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
#ifdef CONFIG_HT_IRQ
@@ -944,12 +956,6 @@
return NULL;
}
-static inline struct pci_dev *pci_find_slot(unsigned int bus,
- unsigned int devfn)
-{
- return NULL;
-}
-
static inline struct pci_dev *pci_get_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
@@ -1105,6 +1111,10 @@
#include <asm/pci.h>
+#ifndef PCIBIOS_MAX_MEM_32
+#define PCIBIOS_MAX_MEM_32 (-1)
+#endif
+
/* these helpers provide future and backwards compatibility
* for accessing popular PCI BAR info */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
@@ -1261,5 +1271,10 @@
}
#endif
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
+extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
+#endif
+
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 2099874..b3646cd 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -66,17 +66,10 @@
PCIE_LNK_SPEED_UNKNOWN = 0xFF,
};
-struct hotplug_slot;
-struct hotplug_slot_attribute {
- struct attribute attr;
- ssize_t (*show)(struct hotplug_slot *, char *);
- ssize_t (*store)(struct hotplug_slot *, const char *, size_t);
-};
-#define to_hotplug_attr(n) container_of(n, struct hotplug_slot_attribute, attr);
-
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
* @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
* @enable_slot: Called when the user wants to enable a specific pci slot
* @disable_slot: Called when the user wants to disable a specific pci slot
* @set_attention_status: Called to set the specific slot's attention LED to
@@ -109,6 +102,7 @@
*/
struct hotplug_slot_ops {
struct module *owner;
+ const char *mod_name;
int (*enable_slot) (struct hotplug_slot *slot);
int (*disable_slot) (struct hotplug_slot *slot);
int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -167,12 +161,21 @@
return pci_slot_name(slot->pci_slot);
}
-extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr,
- const char *name);
+extern int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus,
+ int nr, const char *name,
+ struct module *owner, const char *mod_name);
extern int pci_hp_deregister(struct hotplug_slot *slot);
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
struct hotplug_slot_info *info);
+static inline int pci_hp_register(struct hotplug_slot *slot,
+ struct pci_bus *pbus,
+ int devnr, const char *name)
+{
+ return __pci_hp_register(slot, pbus, devnr, name,
+ THIS_MODULE, KBUILD_MODNAME);
+}
+
/* PCI Setting Record (Type 0) */
struct hpp_type0 {
u32 revision;
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 616bf8b..fcaee42 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -295,8 +295,9 @@
#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
+#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
#define PCI_MSIX_FLAGS 2
@@ -304,7 +305,6 @@
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_MASKALL (1 << 14)
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
-#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
/* CompactPCI Hotswap Register */
@@ -502,6 +502,7 @@
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
#define PCI_EXT_CAP_ID_ARI 14
+#define PCI_EXT_CAP_ID_ATS 15
#define PCI_EXT_CAP_ID_SRIOV 16
/* Advanced Error Reporting */
@@ -620,6 +621,15 @@
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
+/* Address Translation Service */
+#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
+#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
+#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
+#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
+#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
+#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
+#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
+
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 1b3118a..89698d8 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -236,10 +236,16 @@
/*
* Control data for the mmap() data buffer.
*
- * User-space reading this value should issue an rmb(), on SMP capable
- * platforms, after reading this value -- see perf_counter_wakeup().
+ * User-space reading the @data_head value should issue an rmb(), on
+ * SMP capable platforms, after reading this value -- see
+ * perf_counter_wakeup().
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+ * written by userspace to reflect the last read data. In this case
+ * the kernel will not over-write unread data.
*/
__u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
};
#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
@@ -275,6 +281,15 @@
/*
* struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
+ * };
+ */
+ PERF_EVENT_LOST = 2,
+
+ /*
+ * struct {
* struct perf_event_header header;
*
* u32 pid, tid;
@@ -313,30 +328,39 @@
/*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
- * will be PERF_RECORD_*
+ * will be PERF_SAMPLE_*
*
* struct {
* struct perf_event_header header;
*
- * { u64 ip; } && PERF_RECORD_IP
- * { u32 pid, tid; } && PERF_RECORD_TID
- * { u64 time; } && PERF_RECORD_TIME
- * { u64 addr; } && PERF_RECORD_ADDR
- * { u64 config; } && PERF_RECORD_CONFIG
- * { u32 cpu, res; } && PERF_RECORD_CPU
+ * { u64 ip; } && PERF_SAMPLE_IP
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 addr; } && PERF_SAMPLE_ADDR
+ * { u64 config; } && PERF_SAMPLE_CONFIG
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
*
* { u64 nr;
- * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP
+ * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
*
- * { u16 nr,
- * hv,
- * kernel,
- * user;
- * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
+ * { u64 nr,
+ * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* };
*/
};
+enum perf_callchain_context {
+ PERF_CONTEXT_HV = (__u64)-32,
+ PERF_CONTEXT_KERNEL = (__u64)-128,
+ PERF_CONTEXT_USER = (__u64)-512,
+
+ PERF_CONTEXT_GUEST = (__u64)-2048,
+ PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
+ PERF_CONTEXT_GUEST_USER = (__u64)-2560,
+
+ PERF_CONTEXT_MAX = (__u64)-4095,
+};
+
#ifdef __KERNEL__
/*
* Kernel-internal data types and definitions:
@@ -356,6 +380,13 @@
#include <linux/pid_namespace.h>
#include <asm/atomic.h>
+#define PERF_MAX_STACK_DEPTH 255
+
+struct perf_callchain_entry {
+ __u64 nr;
+ __u64 ip[PERF_MAX_STACK_DEPTH];
+};
+
struct task_struct;
/**
@@ -414,6 +445,7 @@
struct perf_mmap_data {
struct rcu_head rcu_head;
int nr_pages; /* nr of data pages */
+ int writable; /* are we writable */
int nr_locked; /* nr pages mlocked */
atomic_t poll; /* POLL_ for wakeups */
@@ -423,8 +455,8 @@
atomic_long_t done_head; /* completed head */
atomic_t lock; /* concurrent writes */
-
atomic_t wakeup; /* needs a wakeup */
+ atomic_t lost; /* nr records lost */
struct perf_counter_mmap_page *user_page;
void *data_pages[0];
@@ -604,6 +636,7 @@
extern int perf_counter_init_task(struct task_struct *child);
extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_free_task(struct task_struct *task);
+extern void set_perf_counter_pending(void);
extern void perf_counter_do_pending(void);
extern void perf_counter_print_debug(void);
extern void __perf_disable(void);
@@ -649,18 +682,6 @@
extern void perf_counter_comm(struct task_struct *tsk);
extern void perf_counter_fork(struct task_struct *tsk);
-extern void perf_counter_task_migration(struct task_struct *task, int cpu);
-
-#define MAX_STACK_DEPTH 255
-
-struct perf_callchain_entry {
- u16 nr;
- u16 hv;
- u16 kernel;
- u16 user;
- u64 ip[MAX_STACK_DEPTH];
-};
-
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
extern int sysctl_perf_counter_paranoid;
@@ -701,8 +722,6 @@
static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }
-static inline void perf_counter_task_migration(struct task_struct *task,
- int cpu) { }
#endif
#endif /* __KERNEL__ */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 16e39c7..e73e242 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -160,8 +160,9 @@
* the rfkill structure. Before calling this function the driver needs
* to be ready to service method calls from rfkill.
*
- * If the software blocked state is not set before registration,
- * set_block will be called to initialize it to a default value.
+ * If rfkill_init_sw_state() is not called before registration,
+ * set_block() will be called to initialize the software blocked state
+ * to a default value.
*
* If the hardware blocked state is not set before registration,
* it is assumed to be unblocked.
@@ -234,9 +235,11 @@
* rfkill drivers that get events when the soft-blocked state changes
* (yes, some platforms directly act on input but allow changing again)
* use this function to notify the rfkill core (and through that also
- * userspace) of the current state. It is not necessary to notify on
- * resume; since hibernation can always change the soft-blocked state,
- * the rfkill core will unconditionally restore the previous state.
+ * userspace) of the current state.
+ *
+ * Drivers should also call this function after resume if the state has
+ * been changed by the user. This only makes sense for "persistent"
+ * devices (see rfkill_init_sw_state()).
*
* This function can be called in any context, even from within rfkill
* callbacks.
@@ -247,6 +250,22 @@
bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
/**
+ * rfkill_init_sw_state - Initialize persistent software block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current software block state to set
+ *
+ * rfkill drivers that preserve their software block state over power off
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of their initial state. It should only be used before
+ * registration.
+ *
+ * In addition, it marks the device as "persistent", an attribute which
+ * can be read by userspace. Persistent devices are expected to preserve
+ * their own state when suspended.
+ */
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
+
+/**
* rfkill_set_states - Set the internal rfkill block states
* @rfkill: pointer to the rfkill class to modify.
* @sw: the current software block state to set
@@ -307,6 +326,10 @@
return blocked;
}
+static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+}
+
static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
{
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 6fd80c4..23d2fb0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -171,6 +171,9 @@
/* Timberdale UART */
#define PORT_TIMBUART 87
+/* Qualcomm MSM SoCs */
+#define PORT_MSM 88
+
#ifdef __KERNEL__
#include <linux/compiler.h>
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h
index 96c0d93..850db2e 100644
--- a/include/linux/serial_reg.h
+++ b/include/linux/serial_reg.h
@@ -323,6 +323,7 @@
#define UART_OMAP_MVER 0x14 /* Module version register */
#define UART_OMAP_SYSC 0x15 /* System configuration register */
#define UART_OMAP_SYSS 0x16 /* System status register */
+#define UART_OMAP_WER 0x17 /* Wake-up enable register */
#endif /* _LINUX_SERIAL_REG_H */
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
new file mode 100644
index 0000000..6508f0d
--- /dev/null
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+
+(c) 2008 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * Functions to create and manage the backchannel
+ */
+
+#ifndef _LINUX_SUNRPC_BC_XPRT_H
+#define _LINUX_SUNRPC_BC_XPRT_H
+
+#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+
+#ifdef CONFIG_NFS_V4_1
+struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt);
+void xprt_free_bc_request(struct rpc_rqst *req);
+int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
+void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs);
+void bc_release_request(struct rpc_task *);
+int bc_send(struct rpc_rqst *req);
+#else /* CONFIG_NFS_V4_1 */
+static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
+ unsigned int min_reqs)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+#endif /* _LINUX_SUNRPC_BC_XPRT_H */
+
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index c39a210..37881f1a 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -143,6 +143,7 @@
const struct rpc_message *msg, int flags);
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int flags);
+void rpc_restart_call_prepare(struct rpc_task *);
void rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 64981a2..4010977 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -210,6 +210,8 @@
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ const struct rpc_call_ops *ops);
void rpc_put_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
@@ -237,6 +239,7 @@
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
+void rpc_prepare_task(struct rpc_task *task);
static inline void rpc_exit(struct rpc_task *task, int status)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 2a30775..ea80096 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -96,6 +96,15 @@
svc_thread_fn sv_function; /* main function for threads */
unsigned int sv_drc_max_pages; /* Total pages for DRC */
unsigned int sv_drc_pages_used;/* DRC pages used */
+#if defined(CONFIG_NFS_V4_1)
+ struct list_head sv_cb_list; /* queue for callback requests
+ * that arrive over the same
+ * connection */
+ spinlock_t sv_cb_lock; /* protects the svc_cb_list */
+ wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
+ * entries in the svc_cb_list */
+ struct svc_xprt *bc_xprt;
+#endif /* CONFIG_NFS_V4_1 */
};
/*
@@ -411,6 +420,8 @@
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
int svc_process(struct svc_rqst *);
+int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
+ struct svc_rqst *);
int svc_register(const struct svc_serv *, const int,
const unsigned short, const unsigned short);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0d9cb6e..2223ae0 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -83,7 +83,7 @@
int svc_print_xprts(char *buf, int maxlen);
struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
const sa_family_t af, const unsigned short port);
-int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen);
+int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen);
static inline void svc_xprt_get(struct svc_xprt *xprt)
{
@@ -118,7 +118,7 @@
return 0;
}
-static inline size_t svc_addr_len(struct sockaddr *sa)
+static inline size_t svc_addr_len(const struct sockaddr *sa)
{
switch (sa->sa_family) {
case AF_INET:
@@ -126,7 +126,8 @@
case AF_INET6:
return sizeof(struct sockaddr_in6);
}
- return -EAFNOSUPPORT;
+
+ return 0;
}
static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt)
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 483e103..04dba23 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -38,10 +38,15 @@
int svc_send(struct svc_rqst *);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
-int svc_sock_names(char *buf, struct svc_serv *serv, char *toclose);
-int svc_addsock(struct svc_serv *serv, int fd, char *name_return);
+int svc_sock_names(struct svc_serv *serv, char *buf,
+ const size_t buflen,
+ const char *toclose);
+int svc_addsock(struct svc_serv *serv, const int fd,
+ char *name_return, const size_t len);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
+struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
+void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 08afe43..1175d58 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -67,7 +67,8 @@
struct rpc_task * rq_task; /* RPC task data */
__be32 rq_xid; /* request XID */
int rq_cong; /* has incremented xprt->cong */
- int rq_received; /* receive completed */
+ int rq_reply_bytes_recvd; /* number of reply */
+ /* bytes received */
u32 rq_seqno; /* gss seq no. used on req. */
int rq_enc_pages_num;
struct page **rq_enc_pages; /* scratch pages for use by
@@ -97,6 +98,12 @@
unsigned long rq_xtime; /* when transmitted */
int rq_ntrans;
+
+#if defined(CONFIG_NFS_V4_1)
+ struct list_head rq_bc_list; /* Callback service list */
+ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
+ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
+#endif /* CONFIG_NFS_V4_1 */
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
@@ -174,6 +181,15 @@
spinlock_t reserve_lock; /* lock slot table */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
+#if defined(CONFIG_NFS_V4_1)
+ struct svc_serv *bc_serv; /* The RPC service which will */
+ /* process the callback */
+ unsigned int bc_alloc_count; /* Total number of preallocs */
+ spinlock_t bc_pa_lock; /* Protects the preallocated
+ * items */
+ struct list_head bc_pa_list; /* List of preallocated
+ * backchannel rpc_rqst's */
+#endif /* CONFIG_NFS_V4_1 */
struct list_head recv;
struct {
@@ -192,6 +208,26 @@
const char *address_strings[RPC_DISPLAY_MAX];
};
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Backchannel flags
+ */
+#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */
+ /* buffer in use */
+#endif /* CONFIG_NFS_V4_1 */
+
+#if defined(CONFIG_NFS_V4_1)
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+}
+#else
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
struct xprt_create {
int ident; /* XPRT_TRANSPORT identifier */
struct sockaddr * srcaddr; /* optional local address */
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index c68bccb..c134dd1 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -3,6 +3,8 @@
#include <linux/fs.h>
+#include <asm/page.h>
+
/*
* Trace sequences are used to allow a function to call several other functions
* to create a string of data to use (up to a max of PAGE_SIZE.
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index f24ecee..8a025d5 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -348,6 +348,7 @@
#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
+#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
/*
@@ -894,9 +895,10 @@
V4L2_COLORFX_BW = 1,
V4L2_COLORFX_SEPIA = 2,
};
+#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+32)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+33)
/* MPEG-class control IDs defined by V4L2 */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index 7b5b91f..9dcb632 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -162,6 +162,8 @@
extern IR_KEYTAB_TYPE ir_codes_kworld_plus_tv_analog[IR_KEYTAB_SIZE];
extern IR_KEYTAB_TYPE ir_codes_kaiomy[IR_KEYTAB_SIZE];
extern IR_KEYTAB_TYPE ir_codes_dm1105_nec[IR_KEYTAB_SIZE];
+extern IR_KEYTAB_TYPE ir_codes_evga_indtube[IR_KEYTAB_SIZE];
+
#endif
/*
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index c48c24e..33a1842 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -153,6 +153,22 @@
struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev,
struct i2c_adapter *adapter,
const char *module_name, const char *client_type, u8 addr);
+
+/* Load an i2c module and return an initialized v4l2_subdev struct.
+ Only call request_module if module_name != NULL.
+ The client_type argument is the name of the chip that's on the adapter. */
+struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *module_name, const char *client_type,
+ int irq, void *platform_data,
+ u8 addr, const unsigned short *probe_addrs);
+
+struct i2c_board_info;
+
+struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, const char *module_name,
+ struct i2c_board_info *info, const unsigned short *probe_addrs);
+
/* Initialize an v4l2_subdev with data from an i2c_client struct */
void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
const struct v4l2_subdev_ops *ops);
@@ -193,4 +209,14 @@
u32 output;
};
+/* ------------------------------------------------------------------------- */
+
+/* Miscellaneous helper functions */
+
+void v4l_bound_align_image(unsigned int *w, unsigned int wmin,
+ unsigned int wmax, unsigned int walign,
+ unsigned int *h, unsigned int hmin,
+ unsigned int hmax, unsigned int halign,
+ unsigned int salign);
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-i2c-drv.h b/include/media/v4l2-i2c-drv.h
index 10a2882..74bf741 100644
--- a/include/media/v4l2-i2c-drv.h
+++ b/include/media/v4l2-i2c-drv.h
@@ -22,7 +22,7 @@
*/
/* NOTE: the full version of this header is in the v4l-dvb repository
- * and allows v4l i2c drivers to be compiled on older kernels as well.
+ * and allows v4l i2c drivers to be compiled on pre-2.6.26 kernels.
* The version of this header as it appears in the kernel is a stripped
* version (without all the backwards compatibility stuff) and so it
* looks a bit odd.
@@ -30,6 +30,9 @@
* If you look at the full version then you will understand the reason
* for introducing this header since you really don't want to have all
* the tricky backwards compatibility code in each and every i2c driver.
+ *
+ * If the i2c driver will never be compiled for pre-2.6.26 kernels, then
+ * DO NOT USE this header! Just write it as a regular i2c driver.
*/
#ifndef __V4L2_I2C_DRV_H__
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index a503e1c..5dcb367 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -79,7 +79,11 @@
not yet implemented) since ops provide proper type-checking.
*/
-/* init: initialize the sensor registors to some sort of reasonable default
+/* s_config: if set, then it is always called by the v4l2_i2c_new_subdev*
+ functions after the v4l2_subdev was registered. It is used to pass
+ platform data to the subdev which can be used during initialization.
+
+ init: initialize the sensor registors to some sort of reasonable default
values. Do not use for new drivers and should be removed in existing
drivers.
@@ -96,6 +100,7 @@
struct v4l2_subdev_core_ops {
int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip);
int (*log_status)(struct v4l2_subdev *sd);
+ int (*s_config)(struct v4l2_subdev *sd, int irq, void *platform_data);
int (*init)(struct v4l2_subdev *sd, u32 val);
int (*load_fw)(struct v4l2_subdev *sd);
int (*reset)(struct v4l2_subdev *sd, u32 val);
diff --git a/include/mtd/Kbuild b/include/mtd/Kbuild
index 8eb018f..192f8fb 100644
--- a/include/mtd/Kbuild
+++ b/include/mtd/Kbuild
@@ -1,5 +1,4 @@
header-y += inftl-user.h
-header-y += jffs2-user.h
header-y += mtd-abi.h
header-y += mtd-user.h
header-y += nftl-user.h
diff --git a/include/mtd/jffs2-user.h b/include/mtd/jffs2-user.h
deleted file mode 100644
index fa94b0e..0000000
--- a/include/mtd/jffs2-user.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * JFFS2 definitions for use in user space only
- */
-
-#ifndef __JFFS2_USER_H__
-#define __JFFS2_USER_H__
-
-/* This file is blessed for inclusion by userspace */
-#include <linux/jffs2.h>
-#include <linux/types.h>
-#include <endian.h>
-#include <byteswap.h>
-
-#undef cpu_to_je16
-#undef cpu_to_je32
-#undef cpu_to_jemode
-#undef je16_to_cpu
-#undef je32_to_cpu
-#undef jemode_to_cpu
-
-extern int target_endian;
-
-#define t16(x) ({ __u16 __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_16(__b); })
-#define t32(x) ({ __u32 __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_32(__b); })
-
-#define cpu_to_je16(x) ((jint16_t){t16(x)})
-#define cpu_to_je32(x) ((jint32_t){t32(x)})
-#define cpu_to_jemode(x) ((jmode_t){t32(x)})
-
-#define je16_to_cpu(x) (t16((x).v16))
-#define je32_to_cpu(x) (t32((x).v32))
-#define jemode_to_cpu(x) (t32((x).m))
-
-#endif /* __JFFS2_USER_H__ */
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index b6595b3..be51ae2 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -12,12 +12,24 @@
__u32 length;
};
+struct erase_info_user64 {
+ __u64 start;
+ __u64 length;
+};
+
struct mtd_oob_buf {
__u32 start;
__u32 length;
unsigned char __user *ptr;
};
+struct mtd_oob_buf64 {
+ __u64 start;
+ __u32 pad;
+ __u32 length;
+ __u64 usr_ptr;
+};
+
#define MTD_ABSENT 0
#define MTD_RAM 1
#define MTD_ROM 2
@@ -95,6 +107,9 @@
#define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout)
#define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
#define MTDFILEMODE _IO('M', 19)
+#define MEMERASE64 _IOW('M', 20, struct erase_info_user64)
+#define MEMWRITEOOB64 _IOWR('M', 21, struct mtd_oob_buf64)
+#define MEMREADOOB64 _IOWR('M', 22, struct mtd_oob_buf64)
/*
* Obsolete legacy interface. Keep it in order not to break userspace
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 21ee49f..f82a1e8 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -94,8 +94,6 @@
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int iucv_sock_wait_state(struct sock *sk, int state, int state2,
- unsigned long timeo);
int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
void iucv_accept_unlink(struct sock *sk);
diff --git a/init/main.c b/init/main.c
index 09131ec..4870dfe 100644
--- a/init/main.c
+++ b/init/main.c
@@ -678,7 +678,6 @@
#endif
page_cgroup_init();
enable_debug_pagealloc();
- cpu_hotplug_init();
kmemtrace_init();
kmemleak_init();
debug_objects_mem_init();
diff --git a/ipc/util.h b/ipc/util.h
index ab3ebf2..764b51a 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -10,6 +10,7 @@
#ifndef _IPC_UTIL_H
#define _IPC_UTIL_H
+#include <linux/unistd.h>
#include <linux/err.h>
#define SEQ_MULTIPLIER (IPCMNI)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 395b697..8ce1004 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -34,14 +34,11 @@
* an ongoing cpu hotplug operation.
*/
int refcount;
-} cpu_hotplug;
-
-void __init cpu_hotplug_init(void)
-{
- cpu_hotplug.active_writer = NULL;
- mutex_init(&cpu_hotplug.lock);
- cpu_hotplug.refcount = 0;
-}
+} cpu_hotplug = {
+ .active_writer = NULL,
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+ .refcount = 0,
+};
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index aaf5c9d..50da676 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -856,7 +856,7 @@
* still called in hard interrupt context and has to check
* whether the interrupt originates from the device. If yes it
* needs to disable the interrupt on the device and return
- * IRQ_THREAD_WAKE which will wake up the handler thread and run
+ * IRQ_WAKE_THREAD which will wake up the handler thread and run
* @thread_fn. This split handler design is necessary to support
* shared interrupts.
*
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 29b685f..1a933a2 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -124,7 +124,7 @@
static void get_ctx(struct perf_counter_context *ctx)
{
- atomic_inc(&ctx->refcount);
+ WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
static void free_ctx(struct rcu_head *head)
@@ -175,6 +175,11 @@
spin_unlock_irqrestore(&ctx->lock, *flags);
goto retry;
}
+
+ if (!atomic_inc_not_zero(&ctx->refcount)) {
+ spin_unlock_irqrestore(&ctx->lock, *flags);
+ ctx = NULL;
+ }
}
rcu_read_unlock();
return ctx;
@@ -193,7 +198,6 @@
ctx = perf_lock_task_context(task, &flags);
if (ctx) {
++ctx->pin_count;
- get_ctx(ctx);
spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
@@ -1283,7 +1287,7 @@
if (!interrupts) {
perf_disable();
counter->pmu->disable(counter);
- atomic_set(&hwc->period_left, 0);
+ atomic64_set(&hwc->period_left, 0);
counter->pmu->enable(counter);
perf_enable();
}
@@ -1459,11 +1463,6 @@
put_ctx(parent_ctx);
ctx->parent_ctx = NULL; /* no longer a clone */
}
- /*
- * Get an extra reference before dropping the lock so that
- * this context won't get freed if the task exits.
- */
- get_ctx(ctx);
spin_unlock_irqrestore(&ctx->lock, flags);
}
@@ -1553,7 +1552,7 @@
static ssize_t
perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
{
- u64 values[3];
+ u64 values[4];
int n;
/*
@@ -1620,22 +1619,6 @@
perf_counter_update_userpage(counter);
}
-static void perf_counter_for_each_sibling(struct perf_counter *counter,
- void (*func)(struct perf_counter *))
-{
- struct perf_counter_context *ctx = counter->ctx;
- struct perf_counter *sibling;
-
- WARN_ON_ONCE(ctx->parent_ctx);
- mutex_lock(&ctx->mutex);
- counter = counter->group_leader;
-
- func(counter);
- list_for_each_entry(sibling, &counter->sibling_list, list_entry)
- func(sibling);
- mutex_unlock(&ctx->mutex);
-}
-
/*
* Holding the top-level counter's child_mutex means that any
* descendant process that has inherited this counter will block
@@ -1658,14 +1641,18 @@
static void perf_counter_for_each(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
- struct perf_counter *child;
+ struct perf_counter_context *ctx = counter->ctx;
+ struct perf_counter *sibling;
- WARN_ON_ONCE(counter->ctx->parent_ctx);
- mutex_lock(&counter->child_mutex);
- perf_counter_for_each_sibling(counter, func);
- list_for_each_entry(child, &counter->child_list, child_list)
- perf_counter_for_each_sibling(child, func);
- mutex_unlock(&counter->child_mutex);
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ counter = counter->group_leader;
+
+ perf_counter_for_each_child(counter, func);
+ func(counter);
+ list_for_each_entry(sibling, &counter->sibling_list, list_entry)
+ perf_counter_for_each_child(counter, func);
+ mutex_unlock(&ctx->mutex);
}
static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
@@ -1806,6 +1793,12 @@
struct perf_mmap_data *data;
int ret = VM_FAULT_SIGBUS;
+ if (vmf->flags & FAULT_FLAG_MKWRITE) {
+ if (vmf->pgoff == 0)
+ ret = 0;
+ return ret;
+ }
+
rcu_read_lock();
data = rcu_dereference(counter->data);
if (!data)
@@ -1819,9 +1812,16 @@
if ((unsigned)nr > data->nr_pages)
goto unlock;
+ if (vmf->flags & FAULT_FLAG_WRITE)
+ goto unlock;
+
vmf->page = virt_to_page(data->data_pages[nr]);
}
+
get_page(vmf->page);
+ vmf->page->mapping = vma->vm_file->f_mapping;
+ vmf->page->index = vmf->pgoff;
+
ret = 0;
unlock:
rcu_read_unlock();
@@ -1874,6 +1874,14 @@
return -ENOMEM;
}
+static void perf_mmap_free_page(unsigned long addr)
+{
+ struct page *page = virt_to_page(addr);
+
+ page->mapping = NULL;
+ __free_page(page);
+}
+
static void __perf_mmap_data_free(struct rcu_head *rcu_head)
{
struct perf_mmap_data *data;
@@ -1881,9 +1889,10 @@
data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
- free_page((unsigned long)data->user_page);
+ perf_mmap_free_page((unsigned long)data->user_page);
for (i = 0; i < data->nr_pages; i++)
- free_page((unsigned long)data->data_pages[i]);
+ perf_mmap_free_page((unsigned long)data->data_pages[i]);
+
kfree(data);
}
@@ -1920,9 +1929,10 @@
}
static struct vm_operations_struct perf_mmap_vmops = {
- .open = perf_mmap_open,
- .close = perf_mmap_close,
- .fault = perf_mmap_fault,
+ .open = perf_mmap_open,
+ .close = perf_mmap_close,
+ .fault = perf_mmap_fault,
+ .page_mkwrite = perf_mmap_fault,
};
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1936,7 +1946,7 @@
long user_extra, extra;
int ret = 0;
- if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
+ if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_size = vma->vm_end - vma->vm_start;
@@ -1995,10 +2005,12 @@
atomic_long_add(user_extra, &user->locked_vm);
vma->vm_mm->locked_vm += extra;
counter->data->nr_locked = extra;
+ if (vma->vm_flags & VM_WRITE)
+ counter->data->writable = 1;
+
unlock:
mutex_unlock(&counter->mmap_mutex);
- vma->vm_flags &= ~VM_MAYWRITE;
vma->vm_flags |= VM_RESERVED;
vma->vm_ops = &perf_mmap_vmops;
@@ -2175,11 +2187,38 @@
unsigned long head;
unsigned long offset;
int nmi;
- int overflow;
+ int sample;
int locked;
unsigned long flags;
};
+static bool perf_output_space(struct perf_mmap_data *data,
+ unsigned int offset, unsigned int head)
+{
+ unsigned long tail;
+ unsigned long mask;
+
+ if (!data->writable)
+ return true;
+
+ mask = (data->nr_pages << PAGE_SHIFT) - 1;
+ /*
+ * Userspace could choose to issue a mb() before updating the tail
+ * pointer. So that all reads will be completed before the write is
+ * issued.
+ */
+ tail = ACCESS_ONCE(data->user_page->data_tail);
+ smp_rmb();
+
+ offset = (offset - tail) & mask;
+ head = (head - tail) & mask;
+
+ if ((int)(head - offset) < 0)
+ return false;
+
+ return true;
+}
+
static void perf_output_wakeup(struct perf_output_handle *handle)
{
atomic_set(&handle->data->poll, POLL_IN);
@@ -2270,55 +2309,6 @@
local_irq_restore(handle->flags);
}
-static int perf_output_begin(struct perf_output_handle *handle,
- struct perf_counter *counter, unsigned int size,
- int nmi, int overflow)
-{
- struct perf_mmap_data *data;
- unsigned int offset, head;
-
- /*
- * For inherited counters we send all the output towards the parent.
- */
- if (counter->parent)
- counter = counter->parent;
-
- rcu_read_lock();
- data = rcu_dereference(counter->data);
- if (!data)
- goto out;
-
- handle->data = data;
- handle->counter = counter;
- handle->nmi = nmi;
- handle->overflow = overflow;
-
- if (!data->nr_pages)
- goto fail;
-
- perf_output_lock(handle);
-
- do {
- offset = head = atomic_long_read(&data->head);
- head += size;
- } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
-
- handle->offset = offset;
- handle->head = head;
-
- if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
- atomic_set(&data->wakeup, 1);
-
- return 0;
-
-fail:
- perf_output_wakeup(handle);
-out:
- rcu_read_unlock();
-
- return -ENOSPC;
-}
-
static void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len)
{
@@ -2358,6 +2348,78 @@
#define perf_output_put(handle, x) \
perf_output_copy((handle), &(x), sizeof(x))
+static int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_counter *counter, unsigned int size,
+ int nmi, int sample)
+{
+ struct perf_mmap_data *data;
+ unsigned int offset, head;
+ int have_lost;
+ struct {
+ struct perf_event_header header;
+ u64 id;
+ u64 lost;
+ } lost_event;
+
+ /*
+ * For inherited counters we send all the output towards the parent.
+ */
+ if (counter->parent)
+ counter = counter->parent;
+
+ rcu_read_lock();
+ data = rcu_dereference(counter->data);
+ if (!data)
+ goto out;
+
+ handle->data = data;
+ handle->counter = counter;
+ handle->nmi = nmi;
+ handle->sample = sample;
+
+ if (!data->nr_pages)
+ goto fail;
+
+ have_lost = atomic_read(&data->lost);
+ if (have_lost)
+ size += sizeof(lost_event);
+
+ perf_output_lock(handle);
+
+ do {
+ offset = head = atomic_long_read(&data->head);
+ head += size;
+ if (unlikely(!perf_output_space(data, offset, head)))
+ goto fail;
+ } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
+
+ handle->offset = offset;
+ handle->head = head;
+
+ if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
+ atomic_set(&data->wakeup, 1);
+
+ if (have_lost) {
+ lost_event.header.type = PERF_EVENT_LOST;
+ lost_event.header.misc = 0;
+ lost_event.header.size = sizeof(lost_event);
+ lost_event.id = counter->id;
+ lost_event.lost = atomic_xchg(&data->lost, 0);
+
+ perf_output_put(handle, lost_event);
+ }
+
+ return 0;
+
+fail:
+ atomic_inc(&data->lost);
+ perf_output_unlock(handle);
+out:
+ rcu_read_unlock();
+
+ return -ENOSPC;
+}
+
static void perf_output_end(struct perf_output_handle *handle)
{
struct perf_counter *counter = handle->counter;
@@ -2365,7 +2427,7 @@
int wakeup_events = counter->attr.wakeup_events;
- if (handle->overflow && wakeup_events) {
+ if (handle->sample && wakeup_events) {
int events = atomic_inc_return(&data->events);
if (events >= wakeup_events) {
atomic_sub(wakeup_events, &data->events);
@@ -2970,7 +3032,7 @@
}
/*
- * Generic counter overflow handling.
+ * Generic counter overflow handling, sampling.
*/
int perf_counter_overflow(struct perf_counter *counter, int nmi,
@@ -3109,20 +3171,15 @@
}
static void perf_swcounter_overflow(struct perf_counter *counter,
- int nmi, struct pt_regs *regs, u64 addr)
+ int nmi, struct perf_sample_data *data)
{
- struct perf_sample_data data = {
- .regs = regs,
- .addr = addr,
- .period = counter->hw.last_period,
- };
+ data->period = counter->hw.last_period;
perf_swcounter_update(counter);
perf_swcounter_set_period(counter);
- if (perf_counter_overflow(counter, nmi, &data))
+ if (perf_counter_overflow(counter, nmi, data))
/* soft-disable the counter */
;
-
}
static int perf_swcounter_is_counting(struct perf_counter *counter)
@@ -3187,18 +3244,18 @@
}
static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
- int nmi, struct pt_regs *regs, u64 addr)
+ int nmi, struct perf_sample_data *data)
{
int neg = atomic64_add_negative(nr, &counter->hw.count);
- if (counter->hw.sample_period && !neg && regs)
- perf_swcounter_overflow(counter, nmi, regs, addr);
+ if (counter->hw.sample_period && !neg && data->regs)
+ perf_swcounter_overflow(counter, nmi, data);
}
static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
- enum perf_type_id type, u32 event,
- u64 nr, int nmi, struct pt_regs *regs,
- u64 addr)
+ enum perf_type_id type,
+ u32 event, u64 nr, int nmi,
+ struct perf_sample_data *data)
{
struct perf_counter *counter;
@@ -3207,8 +3264,8 @@
rcu_read_lock();
list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
- if (perf_swcounter_match(counter, type, event, regs))
- perf_swcounter_add(counter, nr, nmi, regs, addr);
+ if (perf_swcounter_match(counter, type, event, data->regs))
+ perf_swcounter_add(counter, nr, nmi, data);
}
rcu_read_unlock();
}
@@ -3227,9 +3284,9 @@
return &cpuctx->recursion[0];
}
-static void __perf_swcounter_event(enum perf_type_id type, u32 event,
- u64 nr, int nmi, struct pt_regs *regs,
- u64 addr)
+static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
+ u64 nr, int nmi,
+ struct perf_sample_data *data)
{
struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
int *recursion = perf_swcounter_recursion_context(cpuctx);
@@ -3242,7 +3299,7 @@
barrier();
perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
- nr, nmi, regs, addr);
+ nr, nmi, data);
rcu_read_lock();
/*
* doesn't really matter which of the child contexts the
@@ -3250,7 +3307,7 @@
*/
ctx = rcu_dereference(current->perf_counter_ctxp);
if (ctx)
- perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
+ perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
rcu_read_unlock();
barrier();
@@ -3263,7 +3320,12 @@
void
perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
{
- __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
+ struct perf_sample_data data = {
+ .regs = regs,
+ .addr = addr,
+ };
+
+ do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
}
static void perf_swcounter_read(struct perf_counter *counter)
@@ -3404,36 +3466,18 @@
.read = task_clock_perf_counter_read,
};
-/*
- * Software counter: cpu migrations
- */
-void perf_counter_task_migration(struct task_struct *task, int cpu)
-{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
- struct perf_counter_context *ctx;
-
- perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
- PERF_COUNT_SW_CPU_MIGRATIONS,
- 1, 1, NULL, 0);
-
- ctx = perf_pin_task_context(task);
- if (ctx) {
- perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
- PERF_COUNT_SW_CPU_MIGRATIONS,
- 1, 1, NULL, 0);
- perf_unpin_context(ctx);
- }
-}
-
#ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id)
{
- struct pt_regs *regs = get_irq_regs();
+ struct perf_sample_data data = {
+ .regs = get_irq_regs();
+ .addr = 0,
+ };
- if (!regs)
- regs = task_pt_regs(current);
+ if (!data.regs)
+ data.regs = task_pt_regs(current);
- __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
+ do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
}
EXPORT_SYMBOL_GPL(perf_tpcounter_event);
diff --git a/kernel/sched.c b/kernel/sched.c
index 247fd0f..7c9098d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1978,7 +1978,8 @@
if (task_hot(p, old_rq->clock, NULL))
schedstat_inc(p, se.nr_forced2_migrations);
#endif
- perf_counter_task_migration(p, new_cpu);
+ perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS,
+ 1, 1, NULL, 0);
}
p->se.vruntime -= old_cfsrq->min_vruntime -
new_cfsrq->min_vruntime;
@@ -7822,7 +7823,7 @@
free_rootdomain(old_rd);
}
-static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
+static int init_rootdomain(struct root_domain *rd, bool bootmem)
{
gfp_t gfp = GFP_KERNEL;
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 7deffc9..e6c2517 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -152,7 +152,7 @@
*
* Returns: -ENOMEM if memory fails.
*/
-int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
+int cpupri_init(struct cpupri *cp, bool bootmem)
{
gfp_t gfp = GFP_KERNEL;
int i;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 467ca72..70c7e0b 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -162,7 +162,7 @@
{
s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
spread, rq0_min_vruntime, spread0;
- struct rq *rq = &per_cpu(runqueues, cpu);
+ struct rq *rq = cpu_rq(cpu);
struct sched_entity *last;
unsigned long flags;
@@ -191,7 +191,7 @@
if (last)
max_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
- rq0_min_vruntime = per_cpu(runqueues, 0).cfs.min_vruntime;
+ rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
spin_unlock_irqrestore(&rq->lock, flags);
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
SPLIT_NS(MIN_vruntime));
@@ -248,7 +248,7 @@
static void print_cpu(struct seq_file *m, int cpu)
{
- struct rq *rq = &per_cpu(runqueues, cpu);
+ struct rq *rq = cpu_rq(cpu);
#ifdef CONFIG_X86
{
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5f9650e..ba7fd6e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -430,12 +430,13 @@
for_each_sched_entity(se) {
struct load_weight *load;
+ struct load_weight lw;
cfs_rq = cfs_rq_of(se);
load = &cfs_rq->load;
if (unlikely(!se->on_rq)) {
- struct load_weight lw = cfs_rq->load;
+ lw = cfs_rq->load;
update_load_add(&lw, se->load.weight);
load = &lw;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 2aff39c..e0f59a2 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -222,6 +222,15 @@
cpu = smp_processor_id();
ts = &per_cpu(tick_cpu_sched, cpu);
+
+ /*
+ * Call to tick_nohz_start_idle stops the last_update_time from being
+ * updated. Thus, it must not be called in the event we are called from
+ * irq_exit() with the prior state different than idle.
+ */
+ if (!inidle && !ts->inidle)
+ goto end;
+
now = tick_nohz_start_idle(ts);
/*
@@ -239,9 +248,6 @@
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
goto end;
- if (!inidle && !ts->inidle)
- goto end;
-
ts->inidle = 1;
if (need_resched())
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 61071fe..1551f47 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -18,6 +18,13 @@
config HAVE_FUNCTION_GRAPH_TRACER
bool
+config HAVE_FUNCTION_GRAPH_FP_TEST
+ bool
+ help
+ An arch may pass in a unique value (frame pointer) to both the
+ entering and exiting of a function. On exit, the value is compared
+ and if it does not match, then it will panic the kernel.
+
config HAVE_FUNCTION_TRACE_MCOUNT_TEST
bool
help
@@ -121,6 +128,7 @@
bool "Kernel Function Graph Tracer"
depends on HAVE_FUNCTION_GRAPH_TRACER
depends on FUNCTION_TRACER
+ depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
default y
help
Enable the kernel to trace a function at both its return
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index bb60732..3718d55 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1224,6 +1224,13 @@
return;
ftrace_start_up--;
+ /*
+ * Just warn in case of unbalance, no need to kill ftrace, it's not
+ * critical but the ftrace_call callers may be never nopped again after
+ * further ftrace uses.
+ */
+ WARN_ON_ONCE(ftrace_start_up < 0);
+
if (!ftrace_start_up)
command |= FTRACE_DISABLE_CALLS;
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 86cdf67..1edaa95 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -186,7 +186,7 @@
int cpu;
kmemtrace_array = tr;
- for_each_cpu_mask(cpu, cpu_possible_map)
+ for_each_cpu(cpu, cpu_possible_mask)
tracing_reset(tr, cpu);
kmemtrace_start_probes();
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index dc4dc70..04dac26 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -206,6 +206,7 @@
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
#define RB_ALIGNMENT 4U
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
+#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
@@ -415,6 +416,8 @@
unsigned long overrun;
unsigned long read;
local_t entries;
+ local_t committing;
+ local_t commits;
u64 write_stamp;
u64 read_stamp;
atomic_t record_disabled;
@@ -618,12 +621,6 @@
kfree(cpu_buffer);
}
-/*
- * Causes compile errors if the struct buffer_page gets bigger
- * than the struct page.
- */
-extern int ring_buffer_page_too_big(void);
-
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
@@ -646,11 +643,6 @@
int bsize;
int cpu;
- /* Paranoid! Optimizes out when all is well */
- if (sizeof(struct buffer_page) > sizeof(struct page))
- ring_buffer_page_too_big();
-
-
/* keep it in its own cache line */
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
GFP_KERNEL);
@@ -666,8 +658,8 @@
buffer->reader_lock_key = key;
/* need at least two pages */
- if (buffer->pages == 1)
- buffer->pages++;
+ if (buffer->pages < 2)
+ buffer->pages = 2;
/*
* In case of non-hotplug cpu, if the ring-buffer is allocated
@@ -1011,12 +1003,12 @@
{
unsigned long addr = (unsigned long)event;
- return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
+ return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
}
static inline int
-rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
- struct ring_buffer_event *event)
+rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
+ struct ring_buffer_event *event)
{
unsigned long addr = (unsigned long)event;
unsigned long index;
@@ -1029,31 +1021,6 @@
}
static void
-rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
- struct ring_buffer_event *event)
-{
- unsigned long addr = (unsigned long)event;
- unsigned long index;
-
- index = rb_event_index(event);
- addr &= PAGE_MASK;
-
- while (cpu_buffer->commit_page->page != (void *)addr) {
- if (RB_WARN_ON(cpu_buffer,
- cpu_buffer->commit_page == cpu_buffer->tail_page))
- return;
- cpu_buffer->commit_page->page->commit =
- cpu_buffer->commit_page->write;
- rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
- cpu_buffer->write_stamp =
- cpu_buffer->commit_page->page->time_stamp;
- }
-
- /* Now set the commit to the event's index */
- local_set(&cpu_buffer->commit_page->page->commit, index);
-}
-
-static void
rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
{
/*
@@ -1171,6 +1138,60 @@
return length;
}
+static inline void
+rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_page *tail_page,
+ unsigned long tail, unsigned long length)
+{
+ struct ring_buffer_event *event;
+
+ /*
+ * Only the event that crossed the page boundary
+ * must fill the old tail_page with padding.
+ */
+ if (tail >= BUF_PAGE_SIZE) {
+ local_sub(length, &tail_page->write);
+ return;
+ }
+
+ event = __rb_page_index(tail_page, tail);
+ kmemcheck_annotate_bitfield(event, bitfield);
+
+ /*
+ * If this event is bigger than the minimum size, then
+ * we need to be careful that we don't subtract the
+ * write counter enough to allow another writer to slip
+ * in on this page.
+ * We put in a discarded commit instead, to make sure
+ * that this space is not used again.
+ *
+ * If we are less than the minimum size, we don't need to
+ * worry about it.
+ */
+ if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
+ /* No room for any events */
+
+ /* Mark the rest of the page with padding */
+ rb_event_set_padding(event);
+
+ /* Set the write back to the previous setting */
+ local_sub(length, &tail_page->write);
+ return;
+ }
+
+ /* Put in a discarded event */
+ event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
+ event->type_len = RINGBUF_TYPE_PADDING;
+ /* time delta must be non zero */
+ event->time_delta = 1;
+ /* Account for this as an entry */
+ local_inc(&tail_page->entries);
+ local_inc(&cpu_buffer->entries);
+
+ /* Set write to end of buffer */
+ length = (tail + length) - BUF_PAGE_SIZE;
+ local_sub(length, &tail_page->write);
+}
static struct ring_buffer_event *
rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1180,7 +1201,6 @@
{
struct buffer_page *next_page, *head_page, *reader_page;
struct ring_buffer *buffer = cpu_buffer->buffer;
- struct ring_buffer_event *event;
bool lock_taken = false;
unsigned long flags;
@@ -1265,27 +1285,7 @@
cpu_buffer->tail_page->page->time_stamp = *ts;
}
- /*
- * The actual tail page has moved forward.
- */
- if (tail < BUF_PAGE_SIZE) {
- /* Mark the rest of the page with padding */
- event = __rb_page_index(tail_page, tail);
- kmemcheck_annotate_bitfield(event, bitfield);
- rb_event_set_padding(event);
- }
-
- /* Set the write back to the previous setting */
- local_sub(length, &tail_page->write);
-
- /*
- * If this was a commit entry that failed,
- * increment that too
- */
- if (tail_page == cpu_buffer->commit_page &&
- tail == rb_commit_index(cpu_buffer)) {
- rb_set_commit_to_write(cpu_buffer);
- }
+ rb_reset_tail(cpu_buffer, tail_page, tail, length);
__raw_spin_unlock(&cpu_buffer->lock);
local_irq_restore(flags);
@@ -1295,7 +1295,7 @@
out_reset:
/* reset write */
- local_sub(length, &tail_page->write);
+ rb_reset_tail(cpu_buffer, tail_page, tail, length);
if (likely(lock_taken))
__raw_spin_unlock(&cpu_buffer->lock);
@@ -1325,9 +1325,6 @@
/* We reserved something on the buffer */
- if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
- return NULL;
-
event = __rb_page_index(tail_page, tail);
kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(event, type, length);
@@ -1337,11 +1334,11 @@
local_inc(&tail_page->entries);
/*
- * If this is a commit and the tail is zero, then update
- * this page's time stamp.
+ * If this is the first commit on the page, then update
+ * its timestamp.
*/
- if (!tail && rb_is_commit(cpu_buffer, event))
- cpu_buffer->commit_page->page->time_stamp = *ts;
+ if (!tail)
+ tail_page->page->time_stamp = *ts;
return event;
}
@@ -1410,16 +1407,16 @@
return -EAGAIN;
/* Only a commited time event can update the write stamp */
- if (rb_is_commit(cpu_buffer, event)) {
+ if (rb_event_is_commit(cpu_buffer, event)) {
/*
- * If this is the first on the page, then we need to
- * update the page itself, and just put in a zero.
+ * If this is the first on the page, then it was
+ * updated with the page itself. Try to discard it
+ * and if we can't just make it zero.
*/
if (rb_event_index(event)) {
event->time_delta = *delta & TS_MASK;
event->array[0] = *delta >> TS_SHIFT;
} else {
- cpu_buffer->commit_page->page->time_stamp = *ts;
/* try to discard, since we do not need this */
if (!rb_try_to_discard(cpu_buffer, event)) {
/* nope, just zero it */
@@ -1445,6 +1442,44 @@
return ret;
}
+static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ local_inc(&cpu_buffer->committing);
+ local_inc(&cpu_buffer->commits);
+}
+
+static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ unsigned long commits;
+
+ if (RB_WARN_ON(cpu_buffer,
+ !local_read(&cpu_buffer->committing)))
+ return;
+
+ again:
+ commits = local_read(&cpu_buffer->commits);
+ /* synchronize with interrupts */
+ barrier();
+ if (local_read(&cpu_buffer->committing) == 1)
+ rb_set_commit_to_write(cpu_buffer);
+
+ local_dec(&cpu_buffer->committing);
+
+ /* synchronize with interrupts */
+ barrier();
+
+ /*
+ * Need to account for interrupts coming in between the
+ * updating of the commit page and the clearing of the
+ * committing counter.
+ */
+ if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
+ !local_read(&cpu_buffer->committing)) {
+ local_inc(&cpu_buffer->committing);
+ goto again;
+ }
+}
+
static struct ring_buffer_event *
rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length)
@@ -1454,6 +1489,8 @@
int commit = 0;
int nr_loops = 0;
+ rb_start_commit(cpu_buffer);
+
length = rb_calculate_event_length(length);
again:
/*
@@ -1466,7 +1503,7 @@
* Bail!
*/
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
- return NULL;
+ goto out_fail;
ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu);
@@ -1497,7 +1534,7 @@
commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
if (commit == -EBUSY)
- return NULL;
+ goto out_fail;
if (commit == -EAGAIN)
goto again;
@@ -1511,28 +1548,19 @@
if (unlikely(PTR_ERR(event) == -EAGAIN))
goto again;
- if (!event) {
- if (unlikely(commit))
- /*
- * Ouch! We needed a timestamp and it was commited. But
- * we didn't get our event reserved.
- */
- rb_set_commit_to_write(cpu_buffer);
- return NULL;
- }
+ if (!event)
+ goto out_fail;
- /*
- * If the timestamp was commited, make the commit our entry
- * now so that we will update it when needed.
- */
- if (unlikely(commit))
- rb_set_commit_event(cpu_buffer, event);
- else if (!rb_is_commit(cpu_buffer, event))
+ if (!rb_event_is_commit(cpu_buffer, event))
delta = 0;
event->time_delta = delta;
return event;
+
+ out_fail:
+ rb_end_commit(cpu_buffer);
+ return NULL;
}
#define TRACE_RECURSIVE_DEPTH 16
@@ -1642,13 +1670,14 @@
{
local_inc(&cpu_buffer->entries);
- /* Only process further if we own the commit */
- if (!rb_is_commit(cpu_buffer, event))
- return;
+ /*
+ * The event first in the commit queue updates the
+ * time stamp.
+ */
+ if (rb_event_is_commit(cpu_buffer, event))
+ cpu_buffer->write_stamp += event->time_delta;
- cpu_buffer->write_stamp += event->time_delta;
-
- rb_set_commit_to_write(cpu_buffer);
+ rb_end_commit(cpu_buffer);
}
/**
@@ -1737,15 +1766,15 @@
/* The event is discarded regardless */
rb_event_discard(event);
+ cpu = smp_processor_id();
+ cpu_buffer = buffer->buffers[cpu];
+
/*
* This must only be called if the event has not been
* committed yet. Thus we can assume that preemption
* is still disabled.
*/
- RB_WARN_ON(buffer, preemptible());
-
- cpu = smp_processor_id();
- cpu_buffer = buffer->buffers[cpu];
+ RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
if (!rb_try_to_discard(cpu_buffer, event))
goto out;
@@ -1756,13 +1785,7 @@
*/
local_inc(&cpu_buffer->entries);
out:
- /*
- * If a write came in and pushed the tail page
- * we still need to update the commit pointer
- * if we were the commit.
- */
- if (rb_is_commit(cpu_buffer, event))
- rb_set_commit_to_write(cpu_buffer);
+ rb_end_commit(cpu_buffer);
trace_recursive_unlock();
@@ -2446,6 +2469,21 @@
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
+static inline int rb_ok_to_lock(void)
+{
+ /*
+ * If an NMI die dumps out the content of the ring buffer
+ * do not grab locks. We also permanently disable the ring
+ * buffer too. A one time deal is all you get from reading
+ * the ring buffer from an NMI.
+ */
+ if (likely(!in_nmi() && !oops_in_progress))
+ return 1;
+
+ tracing_off_permanent();
+ return 0;
+}
+
/**
* ring_buffer_peek - peek at the next event to be read
* @buffer: The ring buffer to read
@@ -2461,14 +2499,20 @@
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event;
unsigned long flags;
+ int dolock;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
+ dolock = rb_ok_to_lock();
again:
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ local_irq_save(flags);
+ if (dolock)
+ spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(buffer, cpu, ts);
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (dolock)
+ spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
if (event && event->type_len == RINGBUF_TYPE_PADDING) {
cpu_relax();
@@ -2520,6 +2564,9 @@
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event = NULL;
unsigned long flags;
+ int dolock;
+
+ dolock = rb_ok_to_lock();
again:
/* might be called in atomic */
@@ -2529,7 +2576,9 @@
goto out;
cpu_buffer = buffer->buffers[cpu];
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ local_irq_save(flags);
+ if (dolock)
+ spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(buffer, cpu, ts);
if (!event)
@@ -2538,7 +2587,9 @@
rb_advance_reader(cpu_buffer);
out_unlock:
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (dolock)
+ spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
out:
preempt_enable();
@@ -2680,6 +2731,8 @@
cpu_buffer->overrun = 0;
cpu_buffer->read = 0;
local_set(&cpu_buffer->entries, 0);
+ local_set(&cpu_buffer->committing, 0);
+ local_set(&cpu_buffer->commits, 0);
cpu_buffer->write_stamp = 0;
cpu_buffer->read_stamp = 0;
@@ -2734,12 +2787,25 @@
int ring_buffer_empty(struct ring_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
+ int dolock;
int cpu;
+ int ret;
+
+ dolock = rb_ok_to_lock();
/* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
- if (!rb_per_cpu_empty(cpu_buffer))
+ local_irq_save(flags);
+ if (dolock)
+ spin_lock(&cpu_buffer->reader_lock);
+ ret = rb_per_cpu_empty(cpu_buffer);
+ if (dolock)
+ spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
+
+ if (!ret)
return 0;
}
@@ -2755,14 +2821,23 @@
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags;
+ int dolock;
int ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 1;
- cpu_buffer = buffer->buffers[cpu];
- ret = rb_per_cpu_empty(cpu_buffer);
+ dolock = rb_ok_to_lock();
+ cpu_buffer = buffer->buffers[cpu];
+ local_irq_save(flags);
+ if (dolock)
+ spin_lock(&cpu_buffer->reader_lock);
+ ret = rb_per_cpu_empty(cpu_buffer);
+ if (dolock)
+ spin_unlock(&cpu_buffer->reader_lock);
+ local_irq_restore(flags);
return ret;
}
@@ -3108,7 +3183,7 @@
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (cpu_isset(cpu, *buffer->cpumask))
+ if (cpumask_test_cpu(cpu, buffer->cpumask))
return NOTIFY_OK;
buffer->buffers[cpu] =
@@ -3119,7 +3194,7 @@
return NOTIFY_OK;
}
smp_wmb();
- cpu_set(cpu, *buffer->cpumask);
+ cpumask_set_cpu(cpu, buffer->cpumask);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 8d68e14..573d3cc 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -102,8 +102,10 @@
event = (void *)&rpage->data[i];
switch (event->type_len) {
case RINGBUF_TYPE_PADDING:
- /* We don't expect any padding */
- KILL_TEST();
+ /* failed writes may be discarded events */
+ if (!event->time_delta)
+ KILL_TEST();
+ inc = event->array[0] + 4;
break;
case RINGBUF_TYPE_TIME_EXTEND:
inc = 8;
@@ -119,7 +121,7 @@
KILL_TEST();
break;
}
- inc = event->array[0];
+ inc = event->array[0] + 4;
break;
default:
entry = ring_buffer_event_data(event);
@@ -201,7 +203,7 @@
* Hammer the buffer for 10 secs (this may
* make the system stall)
*/
- pr_info("Starting ring buffer hammer\n");
+ trace_printk("Starting ring buffer hammer\n");
do_gettimeofday(&start_tv);
do {
struct ring_buffer_event *event;
@@ -237,7 +239,7 @@
#endif
} while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
- pr_info("End ring buffer hammer\n");
+ trace_printk("End ring buffer hammer\n");
if (consumer) {
/* Init both completions here to avoid races */
@@ -260,49 +262,50 @@
overruns = ring_buffer_overruns(buffer);
if (kill_test)
- pr_info("ERROR!\n");
- pr_info("Time: %lld (usecs)\n", time);
- pr_info("Overruns: %lld\n", overruns);
+ trace_printk("ERROR!\n");
+ trace_printk("Time: %lld (usecs)\n", time);
+ trace_printk("Overruns: %lld\n", overruns);
if (disable_reader)
- pr_info("Read: (reader disabled)\n");
+ trace_printk("Read: (reader disabled)\n");
else
- pr_info("Read: %ld (by %s)\n", read,
+ trace_printk("Read: %ld (by %s)\n", read,
read_events ? "events" : "pages");
- pr_info("Entries: %lld\n", entries);
- pr_info("Total: %lld\n", entries + overruns + read);
- pr_info("Missed: %ld\n", missed);
- pr_info("Hit: %ld\n", hit);
+ trace_printk("Entries: %lld\n", entries);
+ trace_printk("Total: %lld\n", entries + overruns + read);
+ trace_printk("Missed: %ld\n", missed);
+ trace_printk("Hit: %ld\n", hit);
/* Convert time from usecs to millisecs */
do_div(time, USEC_PER_MSEC);
if (time)
hit /= (long)time;
else
- pr_info("TIME IS ZERO??\n");
+ trace_printk("TIME IS ZERO??\n");
- pr_info("Entries per millisec: %ld\n", hit);
+ trace_printk("Entries per millisec: %ld\n", hit);
if (hit) {
/* Calculate the average time in nanosecs */
avg = NSEC_PER_MSEC / hit;
- pr_info("%ld ns per entry\n", avg);
+ trace_printk("%ld ns per entry\n", avg);
}
if (missed) {
if (time)
missed /= (long)time;
- pr_info("Total iterations per millisec: %ld\n", hit + missed);
+ trace_printk("Total iterations per millisec: %ld\n",
+ hit + missed);
/* it is possible that hit + missed will overflow and be zero */
if (!(hit + missed)) {
- pr_info("hit + missed overflowed and totalled zero!\n");
+ trace_printk("hit + missed overflowed and totalled zero!\n");
hit--; /* make it non zero */
}
/* Caculate the average time in nanosecs */
avg = NSEC_PER_MSEC / (hit + missed);
- pr_info("%ld ns per entry\n", avg);
+ trace_printk("%ld ns per entry\n", avg);
}
}
@@ -353,7 +356,7 @@
ring_buffer_producer();
- pr_info("Sleeping for 10 secs\n");
+ trace_printk("Sleeping for 10 secs\n");
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ * SLEEP_TIME);
__set_current_state(TASK_RUNNING);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c1878bf..076fa6f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2191,11 +2191,12 @@
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
return -ENOMEM;
- mutex_lock(&tracing_cpumask_update_lock);
err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
if (err)
goto err_unlock;
+ mutex_lock(&tracing_cpumask_update_lock);
+
local_irq_disable();
__raw_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
@@ -2223,8 +2224,7 @@
return count;
err_unlock:
- mutex_unlock(&tracing_cpumask_update_lock);
- free_cpumask_var(tracing_cpumask);
+ free_cpumask_var(tracing_cpumask_new);
return err;
}
@@ -3626,7 +3626,7 @@
struct trace_seq *s;
unsigned long cnt;
- s = kmalloc(sizeof(*s), GFP_ATOMIC);
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return ENOMEM;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index db6e54b..936c621 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -27,8 +27,6 @@
#include "trace.h"
#include "trace_output.h"
-static DEFINE_MUTEX(filter_mutex);
-
enum filter_op_ids
{
OP_OR,
@@ -178,7 +176,7 @@
static int filter_pred_strloc(struct filter_pred *pred, void *event,
int val1, int val2)
{
- int str_loc = *(int *)(event + pred->offset);
+ unsigned short str_loc = *(unsigned short *)(event + pred->offset);
char *addr = (char *)(event + str_loc);
int cmp, match;
@@ -294,12 +292,12 @@
{
struct event_filter *filter = call->filter;
- mutex_lock(&filter_mutex);
+ mutex_lock(&event_mutex);
if (filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_printf(s, "none\n");
- mutex_unlock(&filter_mutex);
+ mutex_unlock(&event_mutex);
}
void print_subsystem_event_filter(struct event_subsystem *system,
@@ -307,12 +305,12 @@
{
struct event_filter *filter = system->filter;
- mutex_lock(&filter_mutex);
+ mutex_lock(&event_mutex);
if (filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_printf(s, "none\n");
- mutex_unlock(&filter_mutex);
+ mutex_unlock(&event_mutex);
}
static struct ftrace_event_field *
@@ -381,6 +379,7 @@
filter_free_pred(filter->preds[i]);
}
kfree(filter->preds);
+ kfree(filter->filter_string);
kfree(filter);
call->filter = NULL;
}
@@ -433,7 +432,6 @@
filter->n_preds = 0;
}
- mutex_lock(&event_mutex);
list_for_each_entry(call, &ftrace_events, list) {
if (!call->define_fields)
continue;
@@ -443,7 +441,6 @@
remove_filter_string(call->filter);
}
}
- mutex_unlock(&event_mutex);
}
static int filter_add_pred_fn(struct filter_parse_state *ps,
@@ -546,6 +543,7 @@
filter_pred_fn_t fn;
unsigned long long val;
int string_type;
+ int ret;
pred->fn = filter_pred_none;
@@ -581,7 +579,11 @@
pred->not = 1;
return filter_add_pred_fn(ps, call, pred, fn);
} else {
- if (strict_strtoull(pred->str_val, 0, &val)) {
+ if (field->is_signed)
+ ret = strict_strtoll(pred->str_val, 0, &val);
+ else
+ ret = strict_strtoull(pred->str_val, 0, &val);
+ if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL;
}
@@ -625,7 +627,6 @@
filter->preds[filter->n_preds] = pred;
filter->n_preds++;
- mutex_lock(&event_mutex);
list_for_each_entry(call, &ftrace_events, list) {
if (!call->define_fields)
@@ -636,14 +637,12 @@
err = filter_add_pred(ps, call, pred);
if (err) {
- mutex_unlock(&event_mutex);
filter_free_subsystem_preds(system);
parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
goto out;
}
replace_filter_string(call->filter, filter_string);
}
- mutex_unlock(&event_mutex);
out:
return err;
}
@@ -1070,12 +1069,12 @@
struct filter_parse_state *ps;
- mutex_lock(&filter_mutex);
+ mutex_lock(&event_mutex);
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable_preds(call);
remove_filter_string(call->filter);
- mutex_unlock(&filter_mutex);
+ mutex_unlock(&event_mutex);
return 0;
}
@@ -1103,7 +1102,7 @@
postfix_clear(ps);
kfree(ps);
out_unlock:
- mutex_unlock(&filter_mutex);
+ mutex_unlock(&event_mutex);
return err;
}
@@ -1115,12 +1114,12 @@
struct filter_parse_state *ps;
- mutex_lock(&filter_mutex);
+ mutex_lock(&event_mutex);
if (!strcmp(strstrip(filter_string), "0")) {
filter_free_subsystem_preds(system);
remove_filter_string(system->filter);
- mutex_unlock(&filter_mutex);
+ mutex_unlock(&event_mutex);
return 0;
}
@@ -1148,7 +1147,7 @@
postfix_clear(ps);
kfree(ps);
out_unlock:
- mutex_unlock(&filter_mutex);
+ mutex_unlock(&event_mutex);
return err;
}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index c9a0b7d..90f1347 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -193,9 +193,11 @@
static void tracing_stop_function_trace(void)
{
ftrace_function_enabled = 0;
- /* OK if they are not registered */
- unregister_ftrace_function(&trace_stack_ops);
- unregister_ftrace_function(&trace_ops);
+
+ if (func_flags.val & TRACE_FUNC_OPT_STACK)
+ unregister_ftrace_function(&trace_stack_ops);
+ else
+ unregister_ftrace_function(&trace_ops);
}
static int func_set_flag(u32 old_flags, u32 bit, int set)
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 8b59241..d2249ab 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -57,7 +57,8 @@
/* Add a function return address to the trace stack on thread info.*/
int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+ unsigned long frame_pointer)
{
unsigned long long calltime;
int index;
@@ -85,6 +86,7 @@
current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime;
current->ret_stack[index].subtime = 0;
+ current->ret_stack[index].fp = frame_pointer;
*depth = index;
return 0;
@@ -92,7 +94,8 @@
/* Retrieve a function return address to the trace stack on thread info.*/
static void
-ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
+ unsigned long frame_pointer)
{
int index;
@@ -106,6 +109,31 @@
return;
}
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
+ /*
+ * The arch may choose to record the frame pointer used
+ * and check it here to make sure that it is what we expect it
+ * to be. If gcc does not set the place holder of the return
+ * address in the frame pointer, and does a copy instead, then
+ * the function graph trace will fail. This test detects this
+ * case.
+ *
+ * Currently, x86_32 with optimize for size (-Os) makes the latest
+ * gcc do the above.
+ */
+ if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
+ ftrace_graph_stop();
+ WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
+ " from func %pF return to %lx\n",
+ current->ret_stack[index].fp,
+ frame_pointer,
+ (void *)current->ret_stack[index].func,
+ current->ret_stack[index].ret);
+ *ret = (unsigned long)panic;
+ return;
+ }
+#endif
+
*ret = current->ret_stack[index].ret;
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
@@ -117,12 +145,12 @@
* Send the trace to the ring-buffer.
* @return the original return address.
*/
-unsigned long ftrace_return_to_handler(void)
+unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
{
struct ftrace_graph_ret trace;
unsigned long ret;
- ftrace_pop_return_trace(&trace, &ret);
+ ftrace_pop_return_trace(&trace, &ret, frame_pointer);
trace.rettime = trace_clock_local();
ftrace_graph_return(&trace);
barrier();
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6b0c2d8a..23067ab 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -472,7 +472,7 @@
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390
+ select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
select KALLSYMS
select KALLSYMS_ALL
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ad65fc0..3b93129 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -262,11 +262,12 @@
*/
matches += 1;
match_lvl = 0;
- entry->size == ref->size ? ++match_lvl : match_lvl;
- entry->type == ref->type ? ++match_lvl : match_lvl;
- entry->direction == ref->direction ? ++match_lvl : match_lvl;
+ entry->size == ref->size ? ++match_lvl : 0;
+ entry->type == ref->type ? ++match_lvl : 0;
+ entry->direction == ref->direction ? ++match_lvl : 0;
+ entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
- if (match_lvl == 3) {
+ if (match_lvl == 4) {
/* perfect-fit - return the result */
return entry;
} else if (match_lvl > last_lvl) {
@@ -873,72 +874,68 @@
"[addr=%p] [size=%llu]\n", addr, size);
}
-static void check_sync(struct device *dev, dma_addr_t addr,
- u64 size, u64 offset, int direction, bool to_cpu)
+static void check_sync(struct device *dev,
+ struct dma_debug_entry *ref,
+ bool to_cpu)
{
- struct dma_debug_entry ref = {
- .dev = dev,
- .dev_addr = addr,
- .size = size,
- .direction = direction,
- };
struct dma_debug_entry *entry;
struct hash_bucket *bucket;
unsigned long flags;
- bucket = get_hash_bucket(&ref, &flags);
+ bucket = get_hash_bucket(ref, &flags);
- entry = hash_bucket_find(bucket, &ref);
+ entry = hash_bucket_find(bucket, ref);
if (!entry) {
err_printk(dev, NULL, "DMA-API: device driver tries "
"to sync DMA memory it has not allocated "
"[device address=0x%016llx] [size=%llu bytes]\n",
- (unsigned long long)addr, size);
+ (unsigned long long)ref->dev_addr, ref->size);
goto out;
}
- if ((offset + size) > entry->size) {
+ if (ref->size > entry->size) {
err_printk(dev, entry, "DMA-API: device driver syncs"
" DMA memory outside allocated range "
"[device address=0x%016llx] "
- "[allocation size=%llu bytes] [sync offset=%llu] "
- "[sync size=%llu]\n", entry->dev_addr, entry->size,
- offset, size);
+ "[allocation size=%llu bytes] "
+ "[sync offset+size=%llu]\n",
+ entry->dev_addr, entry->size,
+ ref->size);
}
- if (direction != entry->direction) {
+ if (ref->direction != entry->direction) {
err_printk(dev, entry, "DMA-API: device driver syncs "
"DMA memory with different direction "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n",
- (unsigned long long)addr, entry->size,
+ (unsigned long long)ref->dev_addr, entry->size,
dir2name[entry->direction],
- dir2name[direction]);
+ dir2name[ref->direction]);
}
if (entry->direction == DMA_BIDIRECTIONAL)
goto out;
if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
- !(direction == DMA_TO_DEVICE))
+ !(ref->direction == DMA_TO_DEVICE))
err_printk(dev, entry, "DMA-API: device driver syncs "
"device read-only DMA memory for cpu "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n",
- (unsigned long long)addr, entry->size,
+ (unsigned long long)ref->dev_addr, entry->size,
dir2name[entry->direction],
- dir2name[direction]);
+ dir2name[ref->direction]);
if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
- !(direction == DMA_FROM_DEVICE))
+ !(ref->direction == DMA_FROM_DEVICE))
err_printk(dev, entry, "DMA-API: device driver syncs "
"device write-only DMA memory to device "
"[device address=0x%016llx] [size=%llu bytes] "
"[mapped with %s] [synced with %s]\n",
- (unsigned long long)addr, entry->size,
+ (unsigned long long)ref->dev_addr, entry->size,
dir2name[entry->direction],
- dir2name[direction]);
+ dir2name[ref->direction]);
out:
put_hash_bucket(bucket, &flags);
@@ -1036,19 +1033,16 @@
}
EXPORT_SYMBOL(debug_dma_map_sg);
-static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
+static int get_nr_mapped_entries(struct device *dev,
+ struct dma_debug_entry *ref)
{
- struct dma_debug_entry *entry, ref;
+ struct dma_debug_entry *entry;
struct hash_bucket *bucket;
unsigned long flags;
int mapped_ents;
- ref.dev = dev;
- ref.dev_addr = sg_dma_address(s);
- ref.size = sg_dma_len(s),
-
- bucket = get_hash_bucket(&ref, &flags);
- entry = hash_bucket_find(bucket, &ref);
+ bucket = get_hash_bucket(ref, &flags);
+ entry = hash_bucket_find(bucket, ref);
mapped_ents = 0;
if (entry)
@@ -1076,16 +1070,14 @@
.dev_addr = sg_dma_address(s),
.size = sg_dma_len(s),
.direction = dir,
- .sg_call_ents = 0,
+ .sg_call_ents = nelems,
};
if (mapped_ents && i >= mapped_ents)
break;
- if (!i) {
- ref.sg_call_ents = nelems;
- mapped_ents = get_nr_mapped_entries(dev, s);
- }
+ if (!i)
+ mapped_ents = get_nr_mapped_entries(dev, &ref);
check_unmap(&ref);
}
@@ -1140,10 +1132,19 @@
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction)
{
+ struct dma_debug_entry ref;
+
if (unlikely(global_disable))
return;
- check_sync(dev, dma_handle, size, 0, direction, true);
+ ref.type = dma_debug_single;
+ ref.dev = dev;
+ ref.dev_addr = dma_handle;
+ ref.size = size;
+ ref.direction = direction;
+ ref.sg_call_ents = 0;
+
+ check_sync(dev, &ref, true);
}
EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
@@ -1151,10 +1152,19 @@
dma_addr_t dma_handle, size_t size,
int direction)
{
+ struct dma_debug_entry ref;
+
if (unlikely(global_disable))
return;
- check_sync(dev, dma_handle, size, 0, direction, false);
+ ref.type = dma_debug_single;
+ ref.dev = dev;
+ ref.dev_addr = dma_handle;
+ ref.size = size;
+ ref.direction = direction;
+ ref.sg_call_ents = 0;
+
+ check_sync(dev, &ref, false);
}
EXPORT_SYMBOL(debug_dma_sync_single_for_device);
@@ -1163,10 +1173,19 @@
unsigned long offset, size_t size,
int direction)
{
+ struct dma_debug_entry ref;
+
if (unlikely(global_disable))
return;
- check_sync(dev, dma_handle, size, offset, direction, true);
+ ref.type = dma_debug_single;
+ ref.dev = dev;
+ ref.dev_addr = dma_handle;
+ ref.size = offset + size;
+ ref.direction = direction;
+ ref.sg_call_ents = 0;
+
+ check_sync(dev, &ref, true);
}
EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
@@ -1175,10 +1194,19 @@
unsigned long offset,
size_t size, int direction)
{
+ struct dma_debug_entry ref;
+
if (unlikely(global_disable))
return;
- check_sync(dev, dma_handle, size, offset, direction, false);
+ ref.type = dma_debug_single;
+ ref.dev = dev;
+ ref.dev_addr = dma_handle;
+ ref.size = offset + size;
+ ref.direction = direction;
+ ref.sg_call_ents = 0;
+
+ check_sync(dev, &ref, false);
}
EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
@@ -1192,14 +1220,24 @@
return;
for_each_sg(sg, s, nelems, i) {
+
+ struct dma_debug_entry ref = {
+ .type = dma_debug_sg,
+ .dev = dev,
+ .paddr = sg_phys(s),
+ .dev_addr = sg_dma_address(s),
+ .size = sg_dma_len(s),
+ .direction = direction,
+ .sg_call_ents = nelems,
+ };
+
if (!i)
- mapped_ents = get_nr_mapped_entries(dev, s);
+ mapped_ents = get_nr_mapped_entries(dev, &ref);
if (i >= mapped_ents)
break;
- check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
- direction, true);
+ check_sync(dev, &ref, true);
}
}
EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
@@ -1214,14 +1252,23 @@
return;
for_each_sg(sg, s, nelems, i) {
+
+ struct dma_debug_entry ref = {
+ .type = dma_debug_sg,
+ .dev = dev,
+ .paddr = sg_phys(s),
+ .dev_addr = sg_dma_address(s),
+ .size = sg_dma_len(s),
+ .direction = direction,
+ .sg_call_ents = nelems,
+ };
if (!i)
- mapped_ents = get_nr_mapped_entries(dev, s);
+ mapped_ents = get_nr_mapped_entries(dev, &ref);
if (i >= mapped_ents)
break;
- check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
- direction, false);
+ check_sync(dev, &ref, false);
}
}
EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
diff --git a/mm/memory.c b/mm/memory.c
index d5d1653..98bcb90 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1310,8 +1310,9 @@
cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
- ret = handle_mm_fault(mm, vma, start,
- foll_flags & FOLL_WRITE);
+
+ /* FOLL_WRITE matches FAULT_FLAG_WRITE! */
+ ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
@@ -2496,7 +2497,7 @@
*/
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access, pte_t orig_pte)
+ unsigned int flags, pte_t orig_pte)
{
spinlock_t *ptl;
struct page *page;
@@ -2572,9 +2573,9 @@
inc_mm_counter(mm, anon_rss);
pte = mk_pte(page, vma->vm_page_prot);
- if (write_access && reuse_swap_page(page)) {
+ if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
- write_access = 0;
+ flags &= ~FAULT_FLAG_WRITE;
}
flush_icache_page(vma, page);
set_pte_at(mm, address, page_table, pte);
@@ -2587,7 +2588,7 @@
try_to_free_swap(page);
unlock_page(page);
- if (write_access) {
+ if (flags & FAULT_FLAG_WRITE) {
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
if (ret & VM_FAULT_ERROR)
ret &= VM_FAULT_ERROR;
@@ -2616,7 +2617,7 @@
*/
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access)
+ unsigned int flags)
{
struct page *page;
spinlock_t *ptl;
@@ -2776,7 +2777,7 @@
* due to the bad i386 page protection. But it's valid
* for other architectures too.
*
- * Note that if write_access is true, we either now have
+ * Note that if FAULT_FLAG_WRITE is set, we either now have
* an exclusive copy of the page, or this is a shared mapping,
* so we can make it writable and dirty to avoid having to
* handle that later.
@@ -2847,11 +2848,10 @@
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access, pte_t orig_pte)
+ unsigned int flags, pte_t orig_pte)
{
pgoff_t pgoff = (((address & PAGE_MASK)
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
pte_unmap(page_table);
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
@@ -2868,12 +2868,12 @@
*/
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
- int write_access, pte_t orig_pte)
+ unsigned int flags, pte_t orig_pte)
{
- unsigned int flags = FAULT_FLAG_NONLINEAR |
- (write_access ? FAULT_FLAG_WRITE : 0);
pgoff_t pgoff;
+ flags |= FAULT_FLAG_NONLINEAR;
+
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
return 0;
@@ -2904,7 +2904,7 @@
*/
static inline int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
- pte_t *pte, pmd_t *pmd, int write_access)
+ pte_t *pte, pmd_t *pmd, unsigned int flags)
{
pte_t entry;
spinlock_t *ptl;
@@ -2915,30 +2915,30 @@
if (vma->vm_ops) {
if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
- pte, pmd, write_access, entry);
+ pte, pmd, flags, entry);
}
return do_anonymous_page(mm, vma, address,
- pte, pmd, write_access);
+ pte, pmd, flags);
}
if (pte_file(entry))
return do_nonlinear_fault(mm, vma, address,
- pte, pmd, write_access, entry);
+ pte, pmd, flags, entry);
return do_swap_page(mm, vma, address,
- pte, pmd, write_access, entry);
+ pte, pmd, flags, entry);
}
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*pte, entry)))
goto unlock;
- if (write_access) {
+ if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry))
return do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
- if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
+ if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
update_mmu_cache(vma, address, entry);
} else {
/*
@@ -2947,7 +2947,7 @@
* This still avoids useless tlb flushes for .text page faults
* with threads.
*/
- if (write_access)
+ if (flags & FAULT_FLAG_WRITE)
flush_tlb_page(vma, address);
}
unlock:
@@ -2959,7 +2959,7 @@
* By the time we get here, we already hold the mm semaphore
*/
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access)
+ unsigned long address, unsigned int flags)
{
pgd_t *pgd;
pud_t *pud;
@@ -2971,7 +2971,7 @@
count_vm_event(PGFAULT);
if (unlikely(is_vm_hugetlb_page(vma)))
- return hugetlb_fault(mm, vma, address, write_access);
+ return hugetlb_fault(mm, vma, address, flags);
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
@@ -2984,7 +2984,7 @@
if (!pte)
return VM_FAULT_OOM;
- return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
+ return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}
#ifndef __PAGETABLE_PUD_FOLDED
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6f0753f..30d5093 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -488,7 +488,6 @@
*/
static inline void free_page_mlock(struct page *page)
{
- __ClearPageMlocked(page);
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
@@ -558,7 +557,7 @@
unsigned long flags;
int i;
int bad = 0;
- int clearMlocked = PageMlocked(page);
+ int wasMlocked = TestClearPageMlocked(page);
kmemcheck_free_shadow(page, order);
@@ -576,7 +575,7 @@
kernel_map_pages(page, 1 << order, 0);
local_irq_save(flags);
- if (unlikely(clearMlocked))
+ if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order,
@@ -1022,7 +1021,7 @@
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
- int clearMlocked = PageMlocked(page);
+ int wasMlocked = TestClearPageMlocked(page);
kmemcheck_free_shadow(page, 0);
@@ -1041,7 +1040,7 @@
pcp = &zone_pcp(zone, get_cpu())->pcp;
set_page_private(page, get_pageblock_migratetype(page));
local_irq_save(flags);
- if (unlikely(clearMlocked))
+ if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_event(PGFREE);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e8fa2d9..5415526 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -932,7 +932,7 @@
continue;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
- mem_cgroup_del_lru(page);
+ mem_cgroup_del_lru(cursor_page);
nr_taken++;
scan++;
}
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 882a927..3bb6bdb 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -39,14 +39,6 @@
#include "af802154.h"
-#define DBG_DUMP(data, len) { \
- int i; \
- pr_debug("function: %s: data: len %d:\n", __func__, len); \
- for (i = 0; i < len; i++) {\
- pr_debug("%02x: %02x\n", i, (data)[i]); \
- } \
-}
-
/*
* Utility function for families
*/
@@ -302,10 +294,12 @@
static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- DBG_DUMP(skb->data, skb->len);
if (!netif_running(dev))
return -ENODEV;
pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
+#ifdef DEBUG
+ print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
+#endif
if (!net_eq(dev_net(dev), &init_net))
goto drop;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd76b3c..65b3a8b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1085,8 +1085,16 @@
now = jiffies;
if (!rt_caching(dev_net(rt->u.dst.dev))) {
- rt_drop(rt);
- return 0;
+ /*
+ * If we're not caching, just tell the caller we
+ * were successful and don't touch the route. The
+ * caller hold the sole reference to the cache entry, and
+ * it will be released when the caller is done with it.
+ * If we drop it here, the callers have no way to resolve routes
+ * when we're not caching. Instead, just point *rp at rt, so
+ * the caller gets a single use out of the route
+ */
+ goto report_and_exit;
}
rthp = &rt_hash_table[hash].chain;
@@ -1217,6 +1225,8 @@
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
spin_unlock_bh(rt_hash_lock_addr(hash));
+
+report_and_exit:
if (rp)
*rp = rt;
else
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 656cbd1..6be5f92 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -54,6 +54,38 @@
#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
#define CB_TRGCLS_LEN (TRGCLS_SIZE)
+#define __iucv_sock_wait(sk, condition, timeo, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ long __timeo = timeo; \
+ ret = 0; \
+ while (!(condition)) { \
+ prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
+ if (!__timeo) { \
+ ret = -EAGAIN; \
+ break; \
+ } \
+ if (signal_pending(current)) { \
+ ret = sock_intr_errno(__timeo); \
+ break; \
+ } \
+ release_sock(sk); \
+ __timeo = schedule_timeout(__timeo); \
+ lock_sock(sk); \
+ ret = sock_error(sk); \
+ if (ret) \
+ break; \
+ } \
+ finish_wait(sk->sk_sleep, &__wait); \
+} while (0)
+
+#define iucv_sock_wait(sk, condition, timeo) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __iucv_sock_wait(sk, condition, timeo, __ret); \
+ __ret; \
+})
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
@@ -238,6 +270,48 @@
return msg->length;
}
+/**
+ * iucv_sock_in_state() - check for specific states
+ * @sk: sock structure
+ * @state: first iucv sk state
+ * @state: second iucv sk state
+ *
+ * Returns true if the socket in either in the first or second state.
+ */
+static int iucv_sock_in_state(struct sock *sk, int state, int state2)
+{
+ return (sk->sk_state == state || sk->sk_state == state2);
+}
+
+/**
+ * iucv_below_msglim() - function to check if messages can be sent
+ * @sk: sock structure
+ *
+ * Returns true if the send queue length is lower than the message limit.
+ * Always returns true if the socket is not connected (no iucv path for
+ * checking the message limit).
+ */
+static inline int iucv_below_msglim(struct sock *sk)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (sk->sk_state != IUCV_CONNECTED)
+ return 1;
+ return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+}
+
+/**
+ * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
+ */
+static void iucv_sock_wake_msglim(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ read_unlock(&sk->sk_callback_lock);
+}
+
/* Timers */
static void iucv_sock_timeout(unsigned long arg)
{
@@ -329,7 +403,9 @@
timeo = sk->sk_lingertime;
else
timeo = IUCV_DISCONN_TIMEOUT;
- err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
+ err = iucv_sock_wait(sk,
+ iucv_sock_in_state(sk, IUCV_CLOSED, 0),
+ timeo);
}
case IUCV_CLOSING: /* fall through */
@@ -510,39 +586,6 @@
return NULL;
}
-int iucv_sock_wait_state(struct sock *sk, int state, int state2,
- unsigned long timeo)
-{
- DECLARE_WAITQUEUE(wait, current);
- int err = 0;
-
- add_wait_queue(sk->sk_sleep, &wait);
- while (sk->sk_state != state && sk->sk_state != state2) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
-
- err = sock_error(sk);
- if (err)
- break;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
- return err;
-}
-
/* Bind an unbound socket */
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
@@ -687,8 +730,9 @@
}
if (sk->sk_state != IUCV_CONNECTED) {
- err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
+ err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
+ IUCV_DISCONN),
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
}
if (sk->sk_state == IUCV_DISCONN) {
@@ -842,9 +886,11 @@
struct iucv_message txmsg;
struct cmsghdr *cmsg;
int cmsg_done;
+ long timeo;
char user_id[9];
char appl_id[9];
int err;
+ int noblock = msg->msg_flags & MSG_DONTWAIT;
err = sock_error(sk);
if (err)
@@ -864,108 +910,119 @@
goto out;
}
- if (sk->sk_state == IUCV_CONNECTED) {
- /* initialize defaults */
- cmsg_done = 0; /* check for duplicate headers */
- txmsg.class = 0;
+ /* Return if the socket is not in connected state */
+ if (sk->sk_state != IUCV_CONNECTED) {
+ err = -ENOTCONN;
+ goto out;
+ }
- /* iterate over control messages */
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
- cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ /* initialize defaults */
+ cmsg_done = 0; /* check for duplicate headers */
+ txmsg.class = 0;
- if (!CMSG_OK(msg, cmsg)) {
- err = -EINVAL;
- goto out;
- }
+ /* iterate over control messages */
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
+ cmsg = CMSG_NXTHDR(msg, cmsg)) {
- if (cmsg->cmsg_level != SOL_IUCV)
- continue;
-
- if (cmsg->cmsg_type & cmsg_done) {
- err = -EINVAL;
- goto out;
- }
- cmsg_done |= cmsg->cmsg_type;
-
- switch (cmsg->cmsg_type) {
- case SCM_IUCV_TRGCLS:
- if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
- err = -EINVAL;
- goto out;
- }
-
- /* set iucv message target class */
- memcpy(&txmsg.class,
- (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
-
- break;
-
- default:
- err = -EINVAL;
- goto out;
- break;
- }
- }
-
- /* allocate one skb for each iucv message:
- * this is fine for SOCK_SEQPACKET (unless we want to support
- * segmented records using the MSG_EOR flag), but
- * for SOCK_STREAM we might want to improve it in future */
- if (!(skb = sock_alloc_send_skb(sk, len,
- msg->msg_flags & MSG_DONTWAIT,
- &err)))
+ if (!CMSG_OK(msg, cmsg)) {
+ err = -EINVAL;
goto out;
-
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
- err = -EFAULT;
- goto fail;
}
- /* increment and save iucv message tag for msg_completion cbk */
- txmsg.tag = iucv->send_tag++;
- memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
- skb_queue_tail(&iucv->send_skb_q, skb);
+ if (cmsg->cmsg_level != SOL_IUCV)
+ continue;
- if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
- && skb->len <= 7) {
- err = iucv_send_iprm(iucv->path, &txmsg, skb);
+ if (cmsg->cmsg_type & cmsg_done) {
+ err = -EINVAL;
+ goto out;
+ }
+ cmsg_done |= cmsg->cmsg_type;
- /* on success: there is no message_complete callback
- * for an IPRMDATA msg; remove skb from send queue */
- if (err == 0) {
- skb_unlink(skb, &iucv->send_skb_q);
- kfree_skb(skb);
+ switch (cmsg->cmsg_type) {
+ case SCM_IUCV_TRGCLS:
+ if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
+ err = -EINVAL;
+ goto out;
}
- /* this error should never happen since the
- * IUCV_IPRMDATA path flag is set... sever path */
- if (err == 0x15) {
- iucv_path_sever(iucv->path, NULL);
- skb_unlink(skb, &iucv->send_skb_q);
- err = -EPIPE;
- goto fail;
- }
- } else
- err = iucv_message_send(iucv->path, &txmsg, 0, 0,
- (void *) skb->data, skb->len);
- if (err) {
- if (err == 3) {
- user_id[8] = 0;
- memcpy(user_id, iucv->dst_user_id, 8);
- appl_id[8] = 0;
- memcpy(appl_id, iucv->dst_name, 8);
- pr_err("Application %s on z/VM guest %s"
- " exceeds message limit\n",
- user_id, appl_id);
- }
+ /* set iucv message target class */
+ memcpy(&txmsg.class,
+ (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
+
+ break;
+
+ default:
+ err = -EINVAL;
+ goto out;
+ break;
+ }
+ }
+
+ /* allocate one skb for each iucv message:
+ * this is fine for SOCK_SEQPACKET (unless we want to support
+ * segmented records using the MSG_EOR flag), but
+ * for SOCK_STREAM we might want to improve it in future */
+ skb = sock_alloc_send_skb(sk, len, noblock, &err);
+ if (!skb)
+ goto out;
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ err = -EFAULT;
+ goto fail;
+ }
+
+ /* wait if outstanding messages for iucv path has reached */
+ timeo = sock_sndtimeo(sk, noblock);
+ err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
+ if (err)
+ goto fail;
+
+ /* return -ECONNRESET if the socket is no longer connected */
+ if (sk->sk_state != IUCV_CONNECTED) {
+ err = -ECONNRESET;
+ goto fail;
+ }
+
+ /* increment and save iucv message tag for msg_completion cbk */
+ txmsg.tag = iucv->send_tag++;
+ memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+ skb_queue_tail(&iucv->send_skb_q, skb);
+
+ if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
+ && skb->len <= 7) {
+ err = iucv_send_iprm(iucv->path, &txmsg, skb);
+
+ /* on success: there is no message_complete callback
+ * for an IPRMDATA msg; remove skb from send queue */
+ if (err == 0) {
+ skb_unlink(skb, &iucv->send_skb_q);
+ kfree_skb(skb);
+ }
+
+ /* this error should never happen since the
+ * IUCV_IPRMDATA path flag is set... sever path */
+ if (err == 0x15) {
+ iucv_path_sever(iucv->path, NULL);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
}
-
- } else {
- err = -ENOTCONN;
- goto out;
+ } else
+ err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+ (void *) skb->data, skb->len);
+ if (err) {
+ if (err == 3) {
+ user_id[8] = 0;
+ memcpy(user_id, iucv->dst_user_id, 8);
+ appl_id[8] = 0;
+ memcpy(appl_id, iucv->dst_name, 8);
+ pr_err("Application %s on z/VM guest %s"
+ " exceeds message limit\n",
+ appl_id, user_id);
+ err = -EAGAIN;
+ } else
+ err = -EPIPE;
+ skb_unlink(skb, &iucv->send_skb_q);
+ goto fail;
}
release_sock(sk);
@@ -1581,7 +1638,11 @@
spin_unlock_irqrestore(&list->lock, flags);
- kfree_skb(this);
+ if (this) {
+ kfree_skb(this);
+ /* wake up any process waiting for sending */
+ iucv_sock_wake_msglim(sk);
+ }
}
BUG_ON(!this);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 4e68ab4..79693fe 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -56,7 +56,6 @@
u32 idx;
bool registered;
- bool suspended;
bool persistent;
const struct rfkill_ops *ops;
@@ -224,7 +223,7 @@
static void rfkill_event(struct rfkill *rfkill)
{
- if (!rfkill->registered || rfkill->suspended)
+ if (!rfkill->registered)
return;
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
@@ -270,6 +269,9 @@
unsigned long flags;
int err;
+ if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
+ return;
+
/*
* Some platforms (...!) generate input events which affect the
* _hard_ kill state -- whenever something tries to change the
@@ -292,9 +294,6 @@
rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
spin_unlock_irqrestore(&rfkill->lock, flags);
- if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
- return;
-
err = rfkill->ops->set_block(rfkill->data, blocked);
spin_lock_irqsave(&rfkill->lock, flags);
@@ -508,19 +507,32 @@
blocked = blocked || hwblock;
spin_unlock_irqrestore(&rfkill->lock, flags);
- if (!rfkill->registered) {
- rfkill->persistent = true;
- } else {
- if (prev != blocked && !hwblock)
- schedule_work(&rfkill->uevent_work);
+ if (!rfkill->registered)
+ return blocked;
- rfkill_led_trigger_event(rfkill);
- }
+ if (prev != blocked && !hwblock)
+ schedule_work(&rfkill->uevent_work);
+
+ rfkill_led_trigger_event(rfkill);
return blocked;
}
EXPORT_SYMBOL(rfkill_set_sw_state);
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+ unsigned long flags;
+
+ BUG_ON(!rfkill);
+ BUG_ON(rfkill->registered);
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ __rfkill_set_sw_state(rfkill, blocked);
+ rfkill->persistent = true;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+}
+EXPORT_SYMBOL(rfkill_init_sw_state);
+
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
{
unsigned long flags;
@@ -598,6 +610,15 @@
return sprintf(buf, "%d\n", rfkill->idx);
}
+static ssize_t rfkill_persistent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", rfkill->persistent);
+}
+
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
@@ -656,6 +677,7 @@
__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
__ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
+ __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
__ATTR_NULL
@@ -718,8 +740,6 @@
rfkill_pause_polling(rfkill);
- rfkill->suspended = true;
-
return 0;
}
@@ -728,10 +748,10 @@
struct rfkill *rfkill = to_rfkill(dev);
bool cur;
- cur = !!(rfkill->state & RFKILL_BLOCK_SW);
- rfkill_set_block(rfkill, cur);
-
- rfkill->suspended = false;
+ if (!rfkill->persistent) {
+ cur = !!(rfkill->state & RFKILL_BLOCK_SW);
+ rfkill_set_block(rfkill, cur);
+ }
rfkill_resume_polling(rfkill);
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 5369aa3..db73fd2 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -13,5 +13,6 @@
rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o \
svc_xprt.o
+sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
sunrpc-$(CONFIG_PROC_FS) += stats.o
sunrpc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
new file mode 100644
index 0000000..553621f
--- /dev/null
+++ b/net/sunrpc/backchannel_rqst.c
@@ -0,0 +1,281 @@
+/******************************************************************************
+
+(c) 2007 Network Appliance, Inc. All Rights Reserved.
+(c) 2009 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include <linux/tcp.h>
+#include <linux/sunrpc/xprt.h>
+
+#ifdef RPC_DEBUG
+#define RPCDBG_FACILITY RPCDBG_TRANS
+#endif
+
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * Helper routines that track the number of preallocation elements
+ * on the transport.
+ */
+static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
+{
+ return xprt->bc_alloc_count > 0;
+}
+
+static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
+{
+ xprt->bc_alloc_count += n;
+}
+
+static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
+{
+ return xprt->bc_alloc_count -= n;
+}
+
+/*
+ * Free the preallocated rpc_rqst structure and the memory
+ * buffers hanging off of it.
+ */
+static void xprt_free_allocation(struct rpc_rqst *req)
+{
+ struct xdr_buf *xbufp;
+
+ dprintk("RPC: free allocations for req= %p\n", req);
+ BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+ xbufp = &req->rq_private_buf;
+ free_page((unsigned long)xbufp->head[0].iov_base);
+ xbufp = &req->rq_snd_buf;
+ free_page((unsigned long)xbufp->head[0].iov_base);
+ list_del(&req->rq_bc_pa_list);
+ kfree(req);
+}
+
+/*
+ * Preallocate up to min_reqs structures and related buffers for use
+ * by the backchannel. This function can be called multiple times
+ * when creating new sessions that use the same rpc_xprt. The
+ * preallocated buffers are added to the pool of resources used by
+ * the rpc_xprt. Anyone of these resources may be used used by an
+ * incoming callback request. It's up to the higher levels in the
+ * stack to enforce that the maximum number of session slots is not
+ * being exceeded.
+ *
+ * Some callback arguments can be large. For example, a pNFS server
+ * using multiple deviceids. The list can be unbound, but the client
+ * has the ability to tell the server the maximum size of the callback
+ * requests. Each deviceID is 16 bytes, so allocate one page
+ * for the arguments to have enough room to receive a number of these
+ * deviceIDs. The NFS client indicates to the pNFS server that its
+ * callback requests can be up to 4096 bytes in size.
+ */
+int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
+{
+ struct page *page_rcv = NULL, *page_snd = NULL;
+ struct xdr_buf *xbufp = NULL;
+ struct rpc_rqst *req, *tmp;
+ struct list_head tmp_list;
+ int i;
+
+ dprintk("RPC: setup backchannel transport\n");
+
+ /*
+ * We use a temporary list to keep track of the preallocated
+ * buffers. Once we're done building the list we splice it
+ * into the backchannel preallocation list off of the rpc_xprt
+ * struct. This helps minimize the amount of time the list
+ * lock is held on the rpc_xprt struct. It also makes cleanup
+ * easier in case of memory allocation errors.
+ */
+ INIT_LIST_HEAD(&tmp_list);
+ for (i = 0; i < min_reqs; i++) {
+ /* Pre-allocate one backchannel rpc_rqst */
+ req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
+ if (req == NULL) {
+ printk(KERN_ERR "Failed to create bc rpc_rqst\n");
+ goto out_free;
+ }
+
+ /* Add the allocated buffer to the tmp list */
+ dprintk("RPC: adding req= %p\n", req);
+ list_add(&req->rq_bc_pa_list, &tmp_list);
+
+ req->rq_xprt = xprt;
+ INIT_LIST_HEAD(&req->rq_list);
+ INIT_LIST_HEAD(&req->rq_bc_list);
+
+ /* Preallocate one XDR receive buffer */
+ page_rcv = alloc_page(GFP_KERNEL);
+ if (page_rcv == NULL) {
+ printk(KERN_ERR "Failed to create bc receive xbuf\n");
+ goto out_free;
+ }
+ xbufp = &req->rq_rcv_buf;
+ xbufp->head[0].iov_base = page_address(page_rcv);
+ xbufp->head[0].iov_len = PAGE_SIZE;
+ xbufp->tail[0].iov_base = NULL;
+ xbufp->tail[0].iov_len = 0;
+ xbufp->page_len = 0;
+ xbufp->len = PAGE_SIZE;
+ xbufp->buflen = PAGE_SIZE;
+
+ /* Preallocate one XDR send buffer */
+ page_snd = alloc_page(GFP_KERNEL);
+ if (page_snd == NULL) {
+ printk(KERN_ERR "Failed to create bc snd xbuf\n");
+ goto out_free;
+ }
+
+ xbufp = &req->rq_snd_buf;
+ xbufp->head[0].iov_base = page_address(page_snd);
+ xbufp->head[0].iov_len = 0;
+ xbufp->tail[0].iov_base = NULL;
+ xbufp->tail[0].iov_len = 0;
+ xbufp->page_len = 0;
+ xbufp->len = 0;
+ xbufp->buflen = PAGE_SIZE;
+ }
+
+ /*
+ * Add the temporary list to the backchannel preallocation list
+ */
+ spin_lock_bh(&xprt->bc_pa_lock);
+ list_splice(&tmp_list, &xprt->bc_pa_list);
+ xprt_inc_alloc_count(xprt, min_reqs);
+ spin_unlock_bh(&xprt->bc_pa_lock);
+
+ dprintk("RPC: setup backchannel transport done\n");
+ return 0;
+
+out_free:
+ /*
+ * Memory allocation failed, free the temporary list
+ */
+ list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
+ xprt_free_allocation(req);
+
+ dprintk("RPC: setup backchannel transport failed\n");
+ return -1;
+}
+EXPORT_SYMBOL(xprt_setup_backchannel);
+
+/*
+ * Destroys the backchannel preallocated structures.
+ * Since these structures may have been allocated by multiple calls
+ * to xprt_setup_backchannel, we only destroy up to the maximum number
+ * of reqs specified by the caller.
+ * @xprt: the transport holding the preallocated strucures
+ * @max_reqs the maximum number of preallocated structures to destroy
+ */
+void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
+{
+ struct rpc_rqst *req = NULL, *tmp = NULL;
+
+ dprintk("RPC: destroy backchannel transport\n");
+
+ BUG_ON(max_reqs == 0);
+ spin_lock_bh(&xprt->bc_pa_lock);
+ xprt_dec_alloc_count(xprt, max_reqs);
+ list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
+ dprintk("RPC: req=%p\n", req);
+ xprt_free_allocation(req);
+ if (--max_reqs == 0)
+ break;
+ }
+ spin_unlock_bh(&xprt->bc_pa_lock);
+
+ dprintk("RPC: backchannel list empty= %s\n",
+ list_empty(&xprt->bc_pa_list) ? "true" : "false");
+}
+EXPORT_SYMBOL(xprt_destroy_backchannel);
+
+/*
+ * One or more rpc_rqst structure have been preallocated during the
+ * backchannel setup. Buffer space for the send and private XDR buffers
+ * has been preallocated as well. Use xprt_alloc_bc_request to allocate
+ * to this request. Use xprt_free_bc_request to return it.
+ *
+ * We know that we're called in soft interrupt context, grab the spin_lock
+ * since there is no need to grab the bottom half spin_lock.
+ *
+ * Return an available rpc_rqst, otherwise NULL if non are available.
+ */
+struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
+{
+ struct rpc_rqst *req;
+
+ dprintk("RPC: allocate a backchannel request\n");
+ spin_lock(&xprt->bc_pa_lock);
+ if (!list_empty(&xprt->bc_pa_list)) {
+ req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
+ rq_bc_pa_list);
+ list_del(&req->rq_bc_pa_list);
+ } else {
+ req = NULL;
+ }
+ spin_unlock(&xprt->bc_pa_lock);
+
+ if (req != NULL) {
+ set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+ req->rq_reply_bytes_recvd = 0;
+ req->rq_bytes_sent = 0;
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
+ sizeof(req->rq_private_buf));
+ }
+ dprintk("RPC: backchannel req=%p\n", req);
+ return req;
+}
+
+/*
+ * Return the preallocated rpc_rqst structure and XDR buffers
+ * associated with this rpc_task.
+ */
+void xprt_free_bc_request(struct rpc_rqst *req)
+{
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ dprintk("RPC: free backchannel req=%p\n", req);
+
+ smp_mb__before_clear_bit();
+ BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+ clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+ smp_mb__after_clear_bit();
+
+ if (!xprt_need_to_requeue(xprt)) {
+ /*
+ * The last remaining session was destroyed while this
+ * entry was in use. Free the entry and don't attempt
+ * to add back to the list because there is no need to
+ * have anymore preallocated entries.
+ */
+ dprintk("RPC: Last session removed req=%p\n", req);
+ xprt_free_allocation(req);
+ return;
+ }
+
+ /*
+ * Return it to the list of preallocations so that it
+ * may be reused by a new callback request.
+ */
+ spin_lock_bh(&xprt->bc_pa_lock);
+ list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+ spin_unlock_bh(&xprt->bc_pa_lock);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
new file mode 100644
index 0000000..13f214f
--- /dev/null
+++ b/net/sunrpc/bc_svc.c
@@ -0,0 +1,81 @@
+/******************************************************************************
+
+(c) 2007 Network Appliance, Inc. All Rights Reserved.
+(c) 2009 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * The NFSv4.1 callback service helper routines.
+ * They implement the transport level processing required to send the
+ * reply over an existing open connection previously established by the client.
+ */
+
+#if defined(CONFIG_NFS_V4_1)
+
+#include <linux/module.h>
+
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/bc_xprt.h>
+
+#define RPCDBG_FACILITY RPCDBG_SVCDSP
+
+void bc_release_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ dprintk("RPC: bc_release_request: task= %p\n", task);
+
+ /*
+ * Release this request only if it's a backchannel
+ * preallocated request
+ */
+ if (!bc_prealloc(req))
+ return;
+ xprt_free_bc_request(req);
+}
+
+/* Empty callback ops */
+static const struct rpc_call_ops nfs41_callback_ops = {
+};
+
+
+/*
+ * Send the callback reply
+ */
+int bc_send(struct rpc_rqst *req)
+{
+ struct rpc_task *task;
+ int ret;
+
+ dprintk("RPC: bc_send req= %p\n", req);
+ task = rpc_run_bc_task(req, &nfs41_callback_ops);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else {
+ BUG_ON(atomic_read(&task->tk_count) != 1);
+ ret = task->tk_status;
+ rpc_put_task(task);
+ }
+ return ret;
+ dprintk("RPC: bc_send ret= %d \n", ret);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 20029a7..ff0c230 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -488,7 +488,7 @@
{
int delay = 5;
if (cache_clean() == -1)
- delay = 30*HZ;
+ delay = round_jiffies_relative(30*HZ);
if (list_empty(&cache_list))
delay = 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 5abab09..5bc2f45 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -36,7 +36,9 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/metrics.h>
+#include <linux/sunrpc/bc_xprt.h>
+#include "sunrpc.h"
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_CALL
@@ -63,6 +65,9 @@
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
+#if defined(CONFIG_NFS_V4_1)
+static void call_bc_transmit(struct rpc_task *task);
+#endif /* CONFIG_NFS_V4_1 */
static void call_status(struct rpc_task *task);
static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
@@ -613,6 +618,50 @@
}
EXPORT_SYMBOL_GPL(rpc_call_async);
+#if defined(CONFIG_NFS_V4_1)
+/**
+ * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
+ * rpc_execute against it
+ * @ops: RPC call ops
+ */
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ const struct rpc_call_ops *tk_ops)
+{
+ struct rpc_task *task;
+ struct xdr_buf *xbufp = &req->rq_snd_buf;
+ struct rpc_task_setup task_setup_data = {
+ .callback_ops = tk_ops,
+ };
+
+ dprintk("RPC: rpc_run_bc_task req= %p\n", req);
+ /*
+ * Create an rpc_task to send the data
+ */
+ task = rpc_new_task(&task_setup_data);
+ if (!task) {
+ xprt_free_bc_request(req);
+ goto out;
+ }
+ task->tk_rqstp = req;
+
+ /*
+ * Set up the xdr_buf length.
+ * This also indicates that the buffer is XDR encoded already.
+ */
+ xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
+ xbufp->tail[0].iov_len;
+
+ task->tk_action = call_bc_transmit;
+ atomic_inc(&task->tk_count);
+ BUG_ON(atomic_read(&task->tk_count) != 2);
+ rpc_execute(task);
+
+out:
+ dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
+ return task;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
void
rpc_call_start(struct rpc_task *task)
{
@@ -695,6 +744,19 @@
EXPORT_SYMBOL_GPL(rpc_force_rebind);
/*
+ * Restart an (async) RPC call from the call_prepare state.
+ * Usually called from within the exit handler.
+ */
+void
+rpc_restart_call_prepare(struct rpc_task *task)
+{
+ if (RPC_ASSASSINATED(task))
+ return;
+ task->tk_action = rpc_prepare_task;
+}
+EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
+
+/*
* Restart an (async) RPC call. Usually called from within the
* exit handler.
*/
@@ -1085,7 +1147,7 @@
* in order to allow access to the socket to other RPC requests.
*/
call_transmit_status(task);
- if (task->tk_msg.rpc_proc->p_decode != NULL)
+ if (rpc_reply_expected(task))
return;
task->tk_action = rpc_exit_task;
rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
@@ -1120,6 +1182,72 @@
}
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * 5b. Send the backchannel RPC reply. On error, drop the reply. In
+ * addition, disconnect on connectivity errors.
+ */
+static void
+call_bc_transmit(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ BUG_ON(task->tk_status != 0);
+ task->tk_status = xprt_prepare_transmit(task);
+ if (task->tk_status == -EAGAIN) {
+ /*
+ * Could not reserve the transport. Try again after the
+ * transport is released.
+ */
+ task->tk_status = 0;
+ task->tk_action = call_bc_transmit;
+ return;
+ }
+
+ task->tk_action = rpc_exit_task;
+ if (task->tk_status < 0) {
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ return;
+ }
+
+ xprt_transmit(task);
+ xprt_end_transmit(task);
+ dprint_status(task);
+ switch (task->tk_status) {
+ case 0:
+ /* Success */
+ break;
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -ETIMEDOUT:
+ /*
+ * Problem reaching the server. Disconnect and let the
+ * forechannel reestablish the connection. The server will
+ * have to retransmit the backchannel request and we'll
+ * reprocess it. Since these ops are idempotent, there's no
+ * need to cache our reply at this time.
+ */
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ xprt_conditional_disconnect(task->tk_xprt,
+ req->rq_connect_cookie);
+ break;
+ default:
+ /*
+ * We were unable to reply and will have to drop the
+ * request. The server should reconnect and retransmit.
+ */
+ BUG_ON(task->tk_status == -EAGAIN);
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ break;
+ }
+ rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* 6. Sort out the RPC call status
*/
@@ -1130,8 +1258,8 @@
struct rpc_rqst *req = task->tk_rqstp;
int status;
- if (req->rq_received > 0 && !req->rq_bytes_sent)
- task->tk_status = req->rq_received;
+ if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
+ task->tk_status = req->rq_reply_bytes_recvd;
dprint_status(task);
@@ -1248,7 +1376,7 @@
/*
* Ensure that we see all writes made by xprt_complete_rqst()
- * before it changed req->rq_received.
+ * before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
req->rq_rcv_buf.len = req->rq_private_buf.len;
@@ -1289,7 +1417,7 @@
task->tk_status = 0;
/* Note: rpc_verify_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
- req->rq_received = req->rq_rcv_buf.len = 0;
+ req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
@@ -1377,13 +1505,14 @@
}
if ((len -= 3) < 0)
goto out_overflow;
- p += 1; /* skip XID */
+ p += 1; /* skip XID */
if ((n = ntohl(*p++)) != RPC_REPLY) {
dprintk("RPC: %5u %s: not an RPC reply: %x\n",
- task->tk_pid, __func__, n);
+ task->tk_pid, __func__, n);
goto out_garbage;
}
+
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
if (--len < 0)
goto out_overflow;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index ff50a05..1102ce1 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -569,7 +569,7 @@
/*
* Helper to call task->tk_ops->rpc_call_prepare
*/
-static void rpc_prepare_task(struct rpc_task *task)
+void rpc_prepare_task(struct rpc_task *task)
{
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 1ef6e46..1b4e679 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -141,12 +141,14 @@
void rpc_count_iostats(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_iostats *stats = task->tk_client->cl_metrics;
+ struct rpc_iostats *stats;
struct rpc_iostats *op_metrics;
long rtt, execute, queue;
- if (!stats || !req)
+ if (!task->tk_client || !task->tk_client->cl_metrics || !req)
return;
+
+ stats = task->tk_client->cl_metrics;
op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
op_metrics->om_ops++;
@@ -154,7 +156,7 @@
op_metrics->om_timeouts += task->tk_timeouts;
op_metrics->om_bytes_sent += task->tk_bytes_sent;
- op_metrics->om_bytes_recv += req->rq_received;
+ op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
queue = (long)req->rq_xtime - task->tk_start;
if (queue < 0)
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
new file mode 100644
index 0000000..5d9dd74
--- /dev/null
+++ b/net/sunrpc/sunrpc.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+
+(c) 2008 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * Functions and macros used internally by RPC
+ */
+
+#ifndef _NET_SUNRPC_SUNRPC_H
+#define _NET_SUNRPC_SUNRPC_H
+
+static inline int rpc_reply_expected(struct rpc_task *task)
+{
+ return (task->tk_msg.rpc_proc != NULL) &&
+ (task->tk_msg.rpc_proc->p_decode != NULL);
+}
+
+#endif /* _NET_SUNRPC_SUNRPC_H */
+
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5ed8931..952f206 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -25,6 +25,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/bc_xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCDSP
@@ -486,6 +487,10 @@
if (svc_serv_is_pooled(serv))
svc_pool_map_put();
+#if defined(CONFIG_NFS_V4_1)
+ svc_sock_destroy(serv->bc_xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
svc_unregister(serv);
kfree(serv->sv_pools);
kfree(serv);
@@ -970,20 +975,18 @@
}
/*
- * Process the RPC request.
+ * Common routine for processing the RPC request.
*/
-int
-svc_process(struct svc_rqst *rqstp)
+static int
+svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
{
struct svc_program *progp;
struct svc_version *versp = NULL; /* compiler food */
struct svc_procedure *procp = NULL;
- struct kvec * argv = &rqstp->rq_arg.head[0];
- struct kvec * resv = &rqstp->rq_res.head[0];
struct svc_serv *serv = rqstp->rq_server;
kxdrproc_t xdr;
__be32 *statp;
- u32 dir, prog, vers, proc;
+ u32 prog, vers, proc;
__be32 auth_stat, rpc_stat;
int auth_res;
__be32 *reply_statp;
@@ -993,19 +996,6 @@
if (argv->iov_len < 6*4)
goto err_short_len;
- /* setup response xdr_buf.
- * Initially it has just one page
- */
- rqstp->rq_resused = 1;
- resv->iov_base = page_address(rqstp->rq_respages[0]);
- resv->iov_len = 0;
- rqstp->rq_res.pages = rqstp->rq_respages + 1;
- rqstp->rq_res.len = 0;
- rqstp->rq_res.page_base = 0;
- rqstp->rq_res.page_len = 0;
- rqstp->rq_res.buflen = PAGE_SIZE;
- rqstp->rq_res.tail[0].iov_base = NULL;
- rqstp->rq_res.tail[0].iov_len = 0;
/* Will be turned off only in gss privacy case: */
rqstp->rq_splice_ok = 1;
/* Will be turned off only when NFSv4 Sessions are used */
@@ -1014,17 +1004,13 @@
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
- rqstp->rq_xid = svc_getu32(argv);
svc_putu32(resv, rqstp->rq_xid);
- dir = svc_getnl(argv);
vers = svc_getnl(argv);
/* First words of reply: */
svc_putnl(resv, 1); /* REPLY */
- if (dir != 0) /* direction != CALL */
- goto err_bad_dir;
if (vers != 2) /* RPC version number */
goto err_bad_rpc;
@@ -1147,7 +1133,7 @@
sendit:
if (svc_authorise(rqstp))
goto dropit;
- return svc_send(rqstp);
+ return 1; /* Caller can now send it */
dropit:
svc_authorise(rqstp); /* doesn't hurt to call this twice */
@@ -1161,12 +1147,6 @@
goto dropit; /* drop request */
-err_bad_dir:
- svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
-
- serv->sv_stats->rpcbadfmt++;
- goto dropit; /* drop request */
-
err_bad_rpc:
serv->sv_stats->rpcbadfmt++;
svc_putnl(resv, 1); /* REJECT */
@@ -1220,6 +1200,100 @@
EXPORT_SYMBOL_GPL(svc_process);
/*
+ * Process the RPC request.
+ */
+int
+svc_process(struct svc_rqst *rqstp)
+{
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
+ struct svc_serv *serv = rqstp->rq_server;
+ u32 dir;
+ int error;
+
+ /*
+ * Setup response xdr_buf.
+ * Initially it has just one page
+ */
+ rqstp->rq_resused = 1;
+ resv->iov_base = page_address(rqstp->rq_respages[0]);
+ resv->iov_len = 0;
+ rqstp->rq_res.pages = rqstp->rq_respages + 1;
+ rqstp->rq_res.len = 0;
+ rqstp->rq_res.page_base = 0;
+ rqstp->rq_res.page_len = 0;
+ rqstp->rq_res.buflen = PAGE_SIZE;
+ rqstp->rq_res.tail[0].iov_base = NULL;
+ rqstp->rq_res.tail[0].iov_len = 0;
+
+ rqstp->rq_xid = svc_getu32(argv);
+
+ dir = svc_getnl(argv);
+ if (dir != 0) {
+ /* direction != CALL */
+ svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
+ serv->sv_stats->rpcbadfmt++;
+ svc_drop(rqstp);
+ return 0;
+ }
+
+ error = svc_process_common(rqstp, argv, resv);
+ if (error <= 0)
+ return error;
+
+ return svc_send(rqstp);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Process a backchannel RPC request that arrived over an existing
+ * outbound connection
+ */
+int
+bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
+ struct svc_rqst *rqstp)
+{
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
+ int error;
+
+ /* Build the svc_rqst used by the common processing routine */
+ rqstp->rq_xprt = serv->bc_xprt;
+ rqstp->rq_xid = req->rq_xid;
+ rqstp->rq_prot = req->rq_xprt->prot;
+ rqstp->rq_server = serv;
+
+ rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
+ memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
+ memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
+ memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
+
+ /* reset result send buffer "put" position */
+ resv->iov_len = 0;
+
+ if (rqstp->rq_prot != IPPROTO_TCP) {
+ printk(KERN_ERR "No support for Non-TCP transports!\n");
+ BUG();
+ }
+
+ /*
+ * Skip the next two words because they've already been
+ * processed in the trasport
+ */
+ svc_getu32(argv); /* XID */
+ svc_getnl(argv); /* CALLDIR */
+
+ error = svc_process_common(rqstp, argv, resv);
+ if (error <= 0)
+ return error;
+
+ memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
+ return bc_send(req);
+}
+EXPORT_SYMBOL(bc_svc_process);
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
* Return (transport-specific) limit on the rpc payload.
*/
u32 svc_max_payload(const struct svc_rqst *rqstp)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c200d92..6f33d33 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -11,6 +11,7 @@
#include <net/sock.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svc_xprt.h>
+#include <linux/sunrpc/svcsock.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -1097,36 +1098,58 @@
}
EXPORT_SYMBOL_GPL(svc_find_xprt);
-/*
- * Format a buffer with a list of the active transports. A zero for
- * the buflen parameter disables target buffer overflow checking.
+static int svc_one_xprt_name(const struct svc_xprt *xprt,
+ char *pos, int remaining)
+{
+ int len;
+
+ len = snprintf(pos, remaining, "%s %u\n",
+ xprt->xpt_class->xcl_name,
+ svc_xprt_local_port(xprt));
+ if (len >= remaining)
+ return -ENAMETOOLONG;
+ return len;
+}
+
+/**
+ * svc_xprt_names - format a buffer with a list of transport names
+ * @serv: pointer to an RPC service
+ * @buf: pointer to a buffer to be filled in
+ * @buflen: length of buffer to be filled in
+ *
+ * Fills in @buf with a string containing a list of transport names,
+ * each name terminated with '\n'.
+ *
+ * Returns positive length of the filled-in string on success; otherwise
+ * a negative errno value is returned if an error occurs.
*/
-int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen)
+int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
{
struct svc_xprt *xprt;
- char xprt_str[64];
- int totlen = 0;
- int len;
+ int len, totlen;
+ char *pos;
/* Sanity check args */
if (!serv)
return 0;
spin_lock_bh(&serv->sv_lock);
+
+ pos = buf;
+ totlen = 0;
list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
- len = snprintf(xprt_str, sizeof(xprt_str),
- "%s %d\n", xprt->xpt_class->xcl_name,
- svc_xprt_local_port(xprt));
- /* If the string was truncated, replace with error string */
- if (len >= sizeof(xprt_str))
- strcpy(xprt_str, "name-too-long\n");
- /* Don't overflow buffer */
- len = strlen(xprt_str);
- if (buflen && (len + totlen >= buflen))
+ len = svc_one_xprt_name(xprt, pos, buflen - totlen);
+ if (len < 0) {
+ *buf = '\0';
+ totlen = len;
+ }
+ if (len <= 0)
break;
- strcpy(buf+totlen, xprt_str);
+
+ pos += len;
totlen += len;
}
+
spin_unlock_bh(&serv->sv_lock);
return totlen;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9d50423..23128ee 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -240,42 +240,76 @@
/*
* Report socket names for nfsdfs
*/
-static int one_sock_name(char *buf, struct svc_sock *svsk)
+static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
{
+ const struct sock *sk = svsk->sk_sk;
+ const char *proto_name = sk->sk_protocol == IPPROTO_UDP ?
+ "udp" : "tcp";
int len;
- switch(svsk->sk_sk->sk_family) {
- case AF_INET:
- len = sprintf(buf, "ipv4 %s %pI4 %d\n",
- svsk->sk_sk->sk_protocol == IPPROTO_UDP ?
- "udp" : "tcp",
- &inet_sk(svsk->sk_sk)->rcv_saddr,
- inet_sk(svsk->sk_sk)->num);
+ switch (sk->sk_family) {
+ case PF_INET:
+ len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n",
+ proto_name,
+ &inet_sk(sk)->rcv_saddr,
+ inet_sk(sk)->num);
+ break;
+ case PF_INET6:
+ len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
+ proto_name,
+ &inet6_sk(sk)->rcv_saddr,
+ inet_sk(sk)->num);
break;
default:
- len = sprintf(buf, "*unknown-%d*\n",
- svsk->sk_sk->sk_family);
+ len = snprintf(buf, remaining, "*unknown-%d*\n",
+ sk->sk_family);
+ }
+
+ if (len >= remaining) {
+ *buf = '\0';
+ return -ENAMETOOLONG;
}
return len;
}
-int
-svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
+/**
+ * svc_sock_names - construct a list of listener names in a string
+ * @serv: pointer to RPC service
+ * @buf: pointer to a buffer to fill in with socket names
+ * @buflen: size of the buffer to be filled
+ * @toclose: pointer to '\0'-terminated C string containing the name
+ * of a listener to be closed
+ *
+ * Fills in @buf with a '\n'-separated list of names of listener
+ * sockets. If @toclose is not NULL, the socket named by @toclose
+ * is closed, and is not included in the output list.
+ *
+ * Returns positive length of the socket name string, or a negative
+ * errno value on error.
+ */
+int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen,
+ const char *toclose)
{
struct svc_sock *svsk, *closesk = NULL;
int len = 0;
if (!serv)
return 0;
+
spin_lock_bh(&serv->sv_lock);
list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) {
- int onelen = one_sock_name(buf+len, svsk);
- if (toclose && strcmp(toclose, buf+len) == 0)
+ int onelen = svc_one_sock_name(svsk, buf + len, buflen - len);
+ if (onelen < 0) {
+ len = onelen;
+ break;
+ }
+ if (toclose && strcmp(toclose, buf + len) == 0)
closesk = svsk;
else
len += onelen;
}
spin_unlock_bh(&serv->sv_lock);
+
if (closesk)
/* Should unregister with portmap, but you cannot
* unregister just one protocol...
@@ -346,6 +380,7 @@
sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2;
sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
+ sock->sk->sk_write_space(sock->sk);
release_sock(sock->sk);
#endif
}
@@ -387,6 +422,15 @@
}
}
+static void svc_tcp_write_space(struct sock *sk)
+{
+ struct socket *sock = sk->sk_socket;
+
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
+ clear_bit(SOCK_NOSPACE, &sock->flags);
+ svc_write_space(sk);
+}
+
/*
* Copy the UDP datagram's destination address to the rqstp structure.
* The 'destination' address in this case is the address to which the
@@ -427,13 +471,14 @@
long all[SVC_PKTINFO_SPACE / sizeof(long)];
} buffer;
struct cmsghdr *cmh = &buffer.hdr;
- int err, len;
struct msghdr msg = {
.msg_name = svc_addr(rqstp),
.msg_control = cmh,
.msg_controllen = sizeof(buffer),
.msg_flags = MSG_DONTWAIT,
};
+ size_t len;
+ int err;
if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
/* udp sockets need large rcvbuf as all pending
@@ -465,8 +510,8 @@
return -EAGAIN;
}
len = svc_addr_len(svc_addr(rqstp));
- if (len < 0)
- return len;
+ if (len == 0)
+ return -EAFNOSUPPORT;
rqstp->rq_addrlen = len;
if (skb->tstamp.tv64 == 0) {
skb->tstamp = ktime_get_real();
@@ -980,25 +1025,16 @@
static int svc_tcp_has_wspace(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
- struct svc_serv *serv = svsk->sk_xprt.xpt_server;
+ struct svc_serv *serv = svsk->sk_xprt.xpt_server;
int required;
- int wspace;
- /*
- * Set the SOCK_NOSPACE flag before checking the available
- * sock space.
- */
+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
+ return 1;
+ required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
+ if (sk_stream_wspace(svsk->sk_sk) >= required)
+ return 1;
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
- required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
- wspace = sk_stream_wspace(svsk->sk_sk);
-
- if (wspace < sk_stream_min_wspace(svsk->sk_sk))
- return 0;
- if (required * 2 > wspace)
- return 0;
-
- clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
- return 1;
+ return 0;
}
static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
@@ -1054,7 +1090,7 @@
dprintk("setting up TCP socket for reading\n");
sk->sk_state_change = svc_tcp_state_change;
sk->sk_data_ready = svc_tcp_data_ready;
- sk->sk_write_space = svc_write_space;
+ sk->sk_write_space = svc_tcp_write_space;
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
@@ -1148,9 +1184,19 @@
return svsk;
}
-int svc_addsock(struct svc_serv *serv,
- int fd,
- char *name_return)
+/**
+ * svc_addsock - add a listener socket to an RPC service
+ * @serv: pointer to RPC service to which to add a new listener
+ * @fd: file descriptor of the new listener
+ * @name_return: pointer to buffer to fill in with name of listener
+ * @len: size of the buffer
+ *
+ * Fills in socket name and returns positive length of name if successful.
+ * Name is terminated with '\n'. On error, returns a negative errno
+ * value.
+ */
+int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
+ const size_t len)
{
int err = 0;
struct socket *so = sockfd_lookup(fd, &err);
@@ -1190,7 +1236,7 @@
sockfd_put(so);
return err;
}
- return one_sock_name(name_return, svsk);
+ return svc_one_sock_name(svsk, name_return, len);
}
EXPORT_SYMBOL_GPL(svc_addsock);
@@ -1327,3 +1373,42 @@
sock_release(svsk->sk_sock);
kfree(svsk);
}
+
+/*
+ * Create a svc_xprt.
+ *
+ * For internal use only (e.g. nfsv4.1 backchannel).
+ * Callers should typically use the xpo_create() method.
+ */
+struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+{
+ struct svc_sock *svsk;
+ struct svc_xprt *xprt = NULL;
+
+ dprintk("svc: %s\n", __func__);
+ svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
+ if (!svsk)
+ goto out;
+
+ xprt = &svsk->sk_xprt;
+ if (prot == IPPROTO_TCP)
+ svc_xprt_init(&svc_tcp_class, xprt, serv);
+ else if (prot == IPPROTO_UDP)
+ svc_xprt_init(&svc_udp_class, xprt, serv);
+ else
+ BUG();
+out:
+ dprintk("svc: %s return %p\n", __func__, xprt);
+ return xprt;
+}
+EXPORT_SYMBOL_GPL(svc_sock_create);
+
+/*
+ * Destroy a svc_sock.
+ */
+void svc_sock_destroy(struct svc_xprt *xprt)
+{
+ if (xprt)
+ kfree(container_of(xprt, struct svc_sock, sk_xprt));
+}
+EXPORT_SYMBOL_GPL(svc_sock_destroy);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 06ca058..f412a85 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -12,8 +12,9 @@
* - Next, the caller puts together the RPC message, stuffs it into
* the request struct, and calls xprt_transmit().
* - xprt_transmit sends the message and installs the caller on the
- * transport's wait list. At the same time, it installs a timer that
- * is run after the packet's timeout has expired.
+ * transport's wait list. At the same time, if a reply is expected,
+ * it installs a timer that is run after the packet's timeout has
+ * expired.
* - When a packet arrives, the data_ready handler walks the list of
* pending requests for that transport. If a matching XID is found, the
* caller is woken up, and the timer removed.
@@ -46,6 +47,8 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/metrics.h>
+#include "sunrpc.h"
+
/*
* Local variables
*/
@@ -192,8 +195,8 @@
*/
int xprt_reserve_xprt(struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
@@ -803,9 +806,10 @@
list_del_init(&req->rq_list);
req->rq_private_buf.len = copied;
- /* Ensure all writes are done before we update req->rq_received */
+ /* Ensure all writes are done before we update */
+ /* req->rq_reply_bytes_recvd */
smp_wmb();
- req->rq_received = copied;
+ req->rq_reply_bytes_recvd = copied;
rpc_wake_up_queued_task(&xprt->pending, task);
}
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
@@ -820,7 +824,7 @@
dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
spin_lock_bh(&xprt->transport_lock);
- if (!req->rq_received) {
+ if (!req->rq_reply_bytes_recvd) {
if (xprt->ops->timer)
xprt->ops->timer(task);
} else
@@ -842,8 +846,8 @@
dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
spin_lock_bh(&xprt->transport_lock);
- if (req->rq_received && !req->rq_bytes_sent) {
- err = req->rq_received;
+ if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
+ err = req->rq_reply_bytes_recvd;
goto out_unlock;
}
if (!xprt->ops->reserve_xprt(task))
@@ -855,7 +859,7 @@
void xprt_end_transmit(struct rpc_task *task)
{
- xprt_release_write(task->tk_xprt, task);
+ xprt_release_write(task->tk_rqstp->rq_xprt, task);
}
/**
@@ -872,8 +876,11 @@
dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
- if (!req->rq_received) {
- if (list_empty(&req->rq_list)) {
+ if (!req->rq_reply_bytes_recvd) {
+ if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
+ /*
+ * Add to the list only if we're expecting a reply
+ */
spin_lock_bh(&xprt->transport_lock);
/* Update the softirq receive buffer */
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
@@ -908,8 +915,13 @@
/* Don't race with disconnect */
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
- else if (!req->rq_received)
+ else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
+ /*
+ * Sleep on the pending queue since
+ * we're expecting a reply.
+ */
rpc_sleep_on(&xprt->pending, task, xprt_timer);
+ }
spin_unlock_bh(&xprt->transport_lock);
}
@@ -982,11 +994,17 @@
*/
void xprt_release(struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_xprt *xprt;
struct rpc_rqst *req;
+ int is_bc_request;
if (!(req = task->tk_rqstp))
return;
+
+ /* Preallocated backchannel request? */
+ is_bc_request = bc_prealloc(req);
+
+ xprt = req->rq_xprt;
rpc_count_iostats(task);
spin_lock_bh(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
@@ -999,10 +1017,19 @@
mod_timer(&xprt->timer,
xprt->last_used + xprt->idle_timeout);
spin_unlock_bh(&xprt->transport_lock);
- xprt->ops->buf_free(req->rq_buffer);
+ if (!bc_prealloc(req))
+ xprt->ops->buf_free(req->rq_buffer);
task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
+
+ /*
+ * Early exit if this is a backchannel preallocated request.
+ * There is no need to have it added to the RPC slot list.
+ */
+ if (is_bc_request)
+ return;
+
memset(req, 0, sizeof(*req)); /* mark unused */
dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
@@ -1049,6 +1076,11 @@
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
+#if defined(CONFIG_NFS_V4_1)
+ spin_lock_init(&xprt->bc_pa_lock);
+ INIT_LIST_HEAD(&xprt->bc_pa_list);
+#endif /* CONFIG_NFS_V4_1 */
+
INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
setup_timer(&xprt->timer, xprt_init_autodisconnect,
(unsigned long)xprt);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 42a6f9f..9e88438 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -397,14 +397,14 @@
if (!ch)
return 0;
- /* Allocate temporary reply and chunk maps */
- rpl_map = svc_rdma_get_req_map();
- chl_map = svc_rdma_get_req_map();
-
svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
if (ch_count > RPCSVC_MAXPAGES)
return -EINVAL;
+ /* Allocate temporary reply and chunk maps */
+ rpl_map = svc_rdma_get_req_map();
+ chl_map = svc_rdma_get_req_map();
+
if (!xprt->sc_frmr_pg_list_len)
sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
rpl_map, chl_map, ch_count,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6c2d615..83c73c4 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -34,6 +34,9 @@
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprtsock.h>
#include <linux/file.h>
+#ifdef CONFIG_NFS_V4_1
+#include <linux/sunrpc/bc_xprt.h>
+#endif
#include <net/sock.h>
#include <net/checksum.h>
@@ -270,6 +273,13 @@
#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
#define TCP_RCV_COPY_XID (1UL << 2)
#define TCP_RCV_COPY_DATA (1UL << 3)
+#define TCP_RCV_READ_CALLDIR (1UL << 4)
+#define TCP_RCV_COPY_CALLDIR (1UL << 5)
+
+/*
+ * TCP RPC flags
+ */
+#define TCP_RPC_REPLY (1UL << 6)
static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
{
@@ -956,7 +966,7 @@
transport->tcp_offset = 0;
/* Sanity check of the record length */
- if (unlikely(transport->tcp_reclen < 4)) {
+ if (unlikely(transport->tcp_reclen < 8)) {
dprintk("RPC: invalid TCP record fragment length\n");
xprt_force_disconnect(xprt);
return;
@@ -991,33 +1001,77 @@
if (used != len)
return;
transport->tcp_flags &= ~TCP_RCV_COPY_XID;
- transport->tcp_flags |= TCP_RCV_COPY_DATA;
+ transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
transport->tcp_copied = 4;
- dprintk("RPC: reading reply for XID %08x\n",
+ dprintk("RPC: reading %s XID %08x\n",
+ (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
+ : "request with",
ntohl(transport->tcp_xid));
xs_tcp_check_fraghdr(transport);
}
-static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ struct xdr_skb_reader *desc)
{
- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- struct rpc_rqst *req;
+ size_t len, used;
+ u32 offset;
+ __be32 calldir;
+
+ /*
+ * We want transport->tcp_offset to be 8 at the end of this routine
+ * (4 bytes for the xid and 4 bytes for the call/reply flag).
+ * When this function is called for the first time,
+ * transport->tcp_offset is 4 (after having already read the xid).
+ */
+ offset = transport->tcp_offset - sizeof(transport->tcp_xid);
+ len = sizeof(calldir) - offset;
+ dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
+ used = xdr_skb_read_bits(desc, &calldir, len);
+ transport->tcp_offset += used;
+ if (used != len)
+ return;
+ transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_DATA;
+ /*
+ * We don't yet have the XDR buffer, so we will write the calldir
+ * out after we get the buffer from the 'struct rpc_rqst'
+ */
+ if (ntohl(calldir) == RPC_REPLY)
+ transport->tcp_flags |= TCP_RPC_REPLY;
+ else
+ transport->tcp_flags &= ~TCP_RPC_REPLY;
+ dprintk("RPC: reading %s CALL/REPLY flag %08x\n",
+ (transport->tcp_flags & TCP_RPC_REPLY) ?
+ "reply for" : "request with", calldir);
+ xs_tcp_check_fraghdr(transport);
+}
+
+static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc,
+ struct rpc_rqst *req)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
struct xdr_buf *rcvbuf;
size_t len;
ssize_t r;
- /* Find and lock the request corresponding to this xid */
- spin_lock(&xprt->transport_lock);
- req = xprt_lookup_rqst(xprt, transport->tcp_xid);
- if (!req) {
- transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
- dprintk("RPC: XID %08x request not found!\n",
- ntohl(transport->tcp_xid));
- spin_unlock(&xprt->transport_lock);
- return;
+ rcvbuf = &req->rq_private_buf;
+
+ if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
+ /*
+ * Save the RPC direction in the XDR buffer
+ */
+ __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
+ htonl(RPC_REPLY) : 0;
+
+ memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
+ &calldir, sizeof(calldir));
+ transport->tcp_copied += sizeof(calldir);
+ transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
}
- rcvbuf = &req->rq_private_buf;
len = desc->count;
if (len > transport->tcp_reclen - transport->tcp_offset) {
struct xdr_skb_reader my_desc;
@@ -1054,7 +1108,7 @@
"tcp_offset = %u, tcp_reclen = %u\n",
xprt, transport->tcp_copied,
transport->tcp_offset, transport->tcp_reclen);
- goto out;
+ return;
}
dprintk("RPC: XID %08x read %Zd bytes\n",
@@ -1070,11 +1124,125 @@
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
}
-out:
+ return;
+}
+
+/*
+ * Finds the request corresponding to the RPC xid and invokes the common
+ * tcp read code to read the data.
+ */
+static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct rpc_rqst *req;
+
+ dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
+
+ /* Find and lock the request corresponding to this xid */
+ spin_lock(&xprt->transport_lock);
+ req = xprt_lookup_rqst(xprt, transport->tcp_xid);
+ if (!req) {
+ dprintk("RPC: XID %08x request not found!\n",
+ ntohl(transport->tcp_xid));
+ spin_unlock(&xprt->transport_lock);
+ return -1;
+ }
+
+ xs_tcp_read_common(xprt, desc, req);
+
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
xprt_complete_rqst(req->rq_task, transport->tcp_copied);
+
spin_unlock(&xprt->transport_lock);
- xs_tcp_check_fraghdr(transport);
+ return 0;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Obtains an rpc_rqst previously allocated and invokes the common
+ * tcp read code to read the data. The result is placed in the callback
+ * queue.
+ * If we're unable to obtain the rpc_rqst we schedule the closing of the
+ * connection and return -1.
+ */
+static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct rpc_rqst *req;
+
+ req = xprt_alloc_bc_request(xprt);
+ if (req == NULL) {
+ printk(KERN_WARNING "Callback slot table overflowed\n");
+ xprt_force_disconnect(xprt);
+ return -1;
+ }
+
+ req->rq_xid = transport->tcp_xid;
+ dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
+ xs_tcp_read_common(xprt, desc, req);
+
+ if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
+ struct svc_serv *bc_serv = xprt->bc_serv;
+
+ /*
+ * Add callback request to callback list. The callback
+ * service sleeps on the sv_cb_waitq waiting for new
+ * requests. Wake it up after adding enqueing the
+ * request.
+ */
+ dprintk("RPC: add callback request to list\n");
+ spin_lock(&bc_serv->sv_cb_lock);
+ list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ spin_unlock(&bc_serv->sv_cb_lock);
+ wake_up(&bc_serv->sv_cb_waitq);
+ }
+
+ req->rq_private_buf.len = transport->tcp_copied;
+
+ return 0;
+}
+
+static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+
+ return (transport->tcp_flags & TCP_RPC_REPLY) ?
+ xs_tcp_read_reply(xprt, desc) :
+ xs_tcp_read_callback(xprt, desc);
+}
+#else
+static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ return xs_tcp_read_reply(xprt, desc);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Read data off the transport. This can be either an RPC_CALL or an
+ * RPC_REPLY. Relay the processing to helper functions.
+ */
+static void xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+
+ if (_xs_tcp_read_data(xprt, desc) == 0)
+ xs_tcp_check_fraghdr(transport);
+ else {
+ /*
+ * The transport_lock protects the request handling.
+ * There's no need to hold it to update the tcp_flags.
+ */
+ transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
+ }
}
static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
@@ -1114,9 +1282,14 @@
xs_tcp_read_xid(transport, &desc);
continue;
}
+ /* Read in the call/reply flag */
+ if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
+ xs_tcp_read_calldir(transport, &desc);
+ continue;
+ }
/* Read in the request data */
if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
- xs_tcp_read_request(xprt, &desc);
+ xs_tcp_read_data(xprt, &desc);
continue;
}
/* Skip over any trailing bytes on short reads */
@@ -1792,6 +1965,7 @@
*/
set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
xprt_force_disconnect(xprt);
+ break;
case -ECONNREFUSED:
case -ECONNRESET:
case -ENETUNREACH:
@@ -2010,6 +2184,9 @@
.buf_free = rpc_free,
.send_request = xs_tcp_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
+#if defined(CONFIG_NFS_V4_1)
+ .release_request = bc_release_request,
+#endif /* CONFIG_NFS_V4_1 */
.close = xs_tcp_close,
.destroy = xs_destroy,
.print_stats = xs_tcp_print_stats,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2416856..241bddd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1687,13 +1687,52 @@
if (err)
goto out_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
- err = -EINVAL;
+ err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
+ if (err)
goto out;
+
+ /* validate settings */
+ err = 0;
+
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* disallow mesh-specific things */
+ if (params.plink_action)
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* disallow everything but AUTHORIZED flag */
+ if (params.plink_action)
+ err = -EINVAL;
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ /* disallow things mesh doesn't support */
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.sta_flags_mask)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
}
- err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
if (err)
goto out;
@@ -1728,9 +1767,6 @@
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
- if (!info->attrs[NL80211_ATTR_STA_AID])
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
return -EINVAL;
@@ -1745,9 +1781,11 @@
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
- params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
- if (!params.aid || params.aid > IEEE80211_MAX_AID)
- return -EINVAL;
+ if (info->attrs[NL80211_ATTR_STA_AID]) {
+ params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
+ if (!params.aid || params.aid > IEEE80211_MAX_AID)
+ return -EINVAL;
+ }
if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
params.ht_capa =
@@ -1762,13 +1800,39 @@
if (err)
goto out_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
- err = -EINVAL;
+ err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
+ if (err)
goto out;
+
+ /* validate settings */
+ err = 0;
+
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* all ok but must have AID */
+ if (!params.aid)
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ /* disallow things mesh doesn't support */
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.aid)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.sta_flags_mask)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
}
- err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
if (err)
goto out;
@@ -1812,7 +1876,8 @@
goto out_rtnl;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
err = -EINVAL;
goto out;
}
diff --git a/samples/trace_events/Makefile b/samples/trace_events/Makefile
index 0d428dc..0f8d921 100644
--- a/samples/trace_events/Makefile
+++ b/samples/trace_events/Makefile
@@ -1,6 +1,14 @@
# builds the trace events example kernel modules;
# then to use one (as root): insmod <module_name.ko>
+# If you include a trace header outside of include/trace/events
+# then the file that does the #define CREATE_TRACE_POINTS must
+# have that tracer file in its main search path. This is because
+# define_trace.h will include it, and must be able to find it from
+# the include/trace directory.
+#
+# Here trace-events-sample.c does the CREATE_TRACE_POINTS.
+#
CFLAGS_trace-events-sample.o := -I$(src)
obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace-events-sample.o
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 128a897..9977a75 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -19,16 +19,21 @@
* If TRACE_SYSTEM is defined, that will be the directory created
* in the ftrace directory under /debugfs/tracing/events/<system>
*
- * The define_trace.h belowe will also look for a file name of
+ * The define_trace.h below will also look for a file name of
* TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
+ * In this case, it would look for sample.h
*
- * If you want a different system than file name, you can override
- * the header name by defining TRACE_INCLUDE_FILE
+ * If the header name will be different than the system name
+ * (as in this case), then you can override the header name that
+ * define_trace.h will look up by defining TRACE_INCLUDE_FILE
*
- * If this file was called, goofy.h, then we would define:
+ * This file is called trace-events-sample.h but we want the system
+ * to be called "sample". Therefore we must define the name of this
+ * file:
*
- * #define TRACE_INCLUDE_FILE goofy
+ * #define TRACE_INCLUDE_FILE trace-events-sample
*
+ * As we do an the bottom of this file.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sample
@@ -99,13 +104,13 @@
*
* #define TRACE_INCLUDE_PATH ../../samples/trace_events
*
- * But I chose to simply make it use the current directory and then in
- * the Makefile I added:
+ * But the safest and easiest way to simply make it use the directory
+ * that the file is in is to add in the Makefile:
*
- * CFLAGS_trace-events-sample.o := -I$(PWD)/samples/trace_events/
+ * CFLAGS_trace-events-sample.o := -I$(src)
*
* This will make sure the current path is part of the include
- * structure for our file so that we can find it.
+ * structure for our file so that define_trace.h can find it.
*
* I could have made only the top level directory the include:
*
@@ -115,8 +120,8 @@
*
* #define TRACE_INCLUDE_PATH samples/trace_events
*
- * But then if something defines "samples" or "trace_events" then we
- * could risk that being converted too, and give us an unexpected
+ * But then if something defines "samples" or "trace_events" as a macro
+ * then we could risk that being converted too, and give us an unexpected
* result.
*/
#undef TRACE_INCLUDE_PATH
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 562403a..462e2ce 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -972,8 +972,6 @@
snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_SUBSYSTEM_ID, 0);
}
- if (bus->modelname)
- codec->modelname = kstrdup(bus->modelname, GFP_KERNEL);
/* power-up all before initialization */
hda_set_power_state(codec,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d22b260..bf4b78a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -224,6 +224,7 @@
ALC883_ACER,
ALC883_ACER_ASPIRE,
ALC888_ACER_ASPIRE_4930G,
+ ALC888_ACER_ASPIRE_6530G,
ALC888_ACER_ASPIRE_8930G,
ALC883_MEDION,
ALC883_MEDION_MD2,
@@ -970,7 +971,7 @@
}
}
-#if 0 /* it's broken in some acses -- temporarily disabled */
+#if 0 /* it's broken in some cases -- temporarily disabled */
static void alc_mic_automute(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -1170,7 +1171,7 @@
/* invalid SSID, check the special NID pin defcfg instead */
/*
- * 31~30 : port conetcivity
+ * 31~30 : port connectivity
* 29~21 : reserve
* 20 : PCBEEP input
* 19~16 : Check sum (15:1)
@@ -1471,6 +1472,25 @@
};
/*
+ * ALC888 Acer Aspire 6530G model
+ */
+
+static struct hda_verb alc888_acer_aspire_6530g_verbs[] = {
+/* Bias voltage on for external mic port */
+ {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80},
+/* Enable unsolicited event for HP jack */
+ {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
+/* Enable speaker output */
+ {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+/* Enable headphone output */
+ {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP},
+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
+ { }
+};
+
+/*
* ALC889 Acer Aspire 8930G model
*/
@@ -1544,6 +1564,25 @@
}
};
+static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = {
+ /* Interal mic only available on one ADC */
+ {
+ .num_items = 3,
+ .items = {
+ { "Ext Mic", 0x0 },
+ { "CD", 0x4 },
+ { "Int Mic", 0xb },
+ },
+ },
+ {
+ .num_items = 2,
+ .items = {
+ { "Ext Mic", 0x0 },
+ { "CD", 0x4 },
+ },
+ }
+};
+
static struct hda_input_mux alc889_capture_sources[3] = {
/* Digital mic only available on first "ADC" */
{
@@ -6347,7 +6386,7 @@
};
/*
- * macbook pro ALC885 can switch LineIn to LineOut without loosing Mic
+ * macbook pro ALC885 can switch LineIn to LineOut without losing Mic
*/
/*
@@ -7047,7 +7086,7 @@
#define alc882_loopbacks alc880_loopbacks
#endif
-/* pcm configuration: identiacal with ALC880 */
+/* pcm configuration: identical with ALC880 */
#define alc882_pcm_analog_playback alc880_pcm_analog_playback
#define alc882_pcm_analog_capture alc880_pcm_analog_capture
#define alc882_pcm_digital_playback alc880_pcm_digital_playback
@@ -8068,7 +8107,7 @@
{ } /* end */
};
-static struct snd_kcontrol_new alc883_tagra_mixer[] = {
+static struct snd_kcontrol_new alc883_targa_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
@@ -8088,7 +8127,7 @@
{ } /* end */
};
-static struct snd_kcontrol_new alc883_tagra_2ch_mixer[] = {
+static struct snd_kcontrol_new alc883_targa_2ch_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
@@ -8153,6 +8192,19 @@
{ } /* end */
};
+static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = {
+ HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
+ HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT),
+ HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
+ HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+ HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+ { } /* end */
+};
+
static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = {
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -8417,7 +8469,7 @@
{ } /* end */
};
-static struct hda_verb alc883_tagra_verbs[] = {
+static struct hda_verb alc883_targa_verbs[] = {
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
@@ -8626,8 +8678,8 @@
}
/* toggle speaker-output according to the hp-jack state */
-#define alc883_tagra_init_hook alc882_targa_init_hook
-#define alc883_tagra_unsol_event alc882_targa_unsol_event
+#define alc883_targa_init_hook alc882_targa_init_hook
+#define alc883_targa_unsol_event alc882_targa_unsol_event
static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
{
@@ -8957,7 +9009,7 @@
#define alc883_loopbacks alc880_loopbacks
#endif
-/* pcm configuration: identiacal with ALC880 */
+/* pcm configuration: identical with ALC880 */
#define alc883_pcm_analog_playback alc880_pcm_analog_playback
#define alc883_pcm_analog_capture alc880_pcm_analog_capture
#define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
@@ -8978,6 +9030,7 @@
[ALC883_ACER] = "acer",
[ALC883_ACER_ASPIRE] = "acer-aspire",
[ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g",
+ [ALC888_ACER_ASPIRE_6530G] = "acer-aspire-6530g",
[ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g",
[ALC883_MEDION] = "medion",
[ALC883_MEDION_MD2] = "medion-md2",
@@ -9021,7 +9074,7 @@
SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
ALC888_ACER_ASPIRE_4930G),
SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
- ALC888_ACER_ASPIRE_4930G),
+ ALC888_ACER_ASPIRE_6530G),
/* default Acer -- disabled as it causes more problems.
* model=auto should work fine now
*/
@@ -9069,6 +9122,7 @@
SND_PCI_QUIRK(0x1462, 0x7267, "MSI", ALC883_3ST_6ch_DIG),
SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG),
+ SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG),
SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
@@ -9165,8 +9219,8 @@
.input_mux = &alc883_capture_source,
},
[ALC883_TARGA_DIG] = {
- .mixers = { alc883_tagra_mixer, alc883_chmode_mixer },
- .init_verbs = { alc883_init_verbs, alc883_tagra_verbs},
+ .mixers = { alc883_targa_mixer, alc883_chmode_mixer },
+ .init_verbs = { alc883_init_verbs, alc883_targa_verbs},
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
.dac_nids = alc883_dac_nids,
.dig_out_nid = ALC883_DIGOUT_NID,
@@ -9174,12 +9228,12 @@
.channel_mode = alc883_3ST_6ch_modes,
.need_dac_fix = 1,
.input_mux = &alc883_capture_source,
- .unsol_event = alc883_tagra_unsol_event,
- .init_hook = alc883_tagra_init_hook,
+ .unsol_event = alc883_targa_unsol_event,
+ .init_hook = alc883_targa_init_hook,
},
[ALC883_TARGA_2ch_DIG] = {
- .mixers = { alc883_tagra_2ch_mixer},
- .init_verbs = { alc883_init_verbs, alc883_tagra_verbs},
+ .mixers = { alc883_targa_2ch_mixer},
+ .init_verbs = { alc883_init_verbs, alc883_targa_verbs},
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
.dac_nids = alc883_dac_nids,
.adc_nids = alc883_adc_nids_alt,
@@ -9188,13 +9242,13 @@
.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
.channel_mode = alc883_3ST_2ch_modes,
.input_mux = &alc883_capture_source,
- .unsol_event = alc883_tagra_unsol_event,
- .init_hook = alc883_tagra_init_hook,
+ .unsol_event = alc883_targa_unsol_event,
+ .init_hook = alc883_targa_init_hook,
},
[ALC883_TARGA_8ch_DIG] = {
.mixers = { alc883_base_mixer, alc883_chmode_mixer },
.init_verbs = { alc883_init_verbs, alc880_gpio3_init_verbs,
- alc883_tagra_verbs },
+ alc883_targa_verbs },
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
.dac_nids = alc883_dac_nids,
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
@@ -9206,8 +9260,8 @@
.channel_mode = alc883_4ST_8ch_modes,
.need_dac_fix = 1,
.input_mux = &alc883_capture_source,
- .unsol_event = alc883_tagra_unsol_event,
- .init_hook = alc883_tagra_init_hook,
+ .unsol_event = alc883_targa_unsol_event,
+ .init_hook = alc883_targa_init_hook,
},
[ALC883_ACER] = {
.mixers = { alc883_base_mixer },
@@ -9255,6 +9309,24 @@
.unsol_event = alc_automute_amp_unsol_event,
.init_hook = alc888_acer_aspire_4930g_init_hook,
},
+ [ALC888_ACER_ASPIRE_6530G] = {
+ .mixers = { alc888_acer_aspire_6530_mixer },
+ .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
+ alc888_acer_aspire_6530g_verbs },
+ .num_dacs = ARRAY_SIZE(alc883_dac_nids),
+ .dac_nids = alc883_dac_nids,
+ .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
+ .adc_nids = alc883_adc_nids_rev,
+ .capsrc_nids = alc883_capsrc_nids_rev,
+ .dig_out_nid = ALC883_DIGOUT_NID,
+ .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
+ .channel_mode = alc883_3ST_2ch_modes,
+ .num_mux_defs =
+ ARRAY_SIZE(alc888_2_capture_sources),
+ .input_mux = alc888_acer_aspire_6530_sources,
+ .unsol_event = alc_automute_amp_unsol_event,
+ .init_hook = alc888_acer_aspire_4930g_init_hook,
+ },
[ALC888_ACER_ASPIRE_8930G] = {
.mixers = { alc888_base_mixer,
alc883_chmode_mixer },
@@ -9361,7 +9433,7 @@
.init_hook = alc888_lenovo_ms7195_front_automute,
},
[ALC883_HAIER_W66] = {
- .mixers = { alc883_tagra_2ch_mixer},
+ .mixers = { alc883_targa_2ch_mixer},
.init_verbs = { alc883_init_verbs, alc883_haier_w66_verbs},
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
.dac_nids = alc883_dac_nids,
@@ -11131,7 +11203,7 @@
#define alc262_loopbacks alc880_loopbacks
#endif
-/* pcm configuration: identiacal with ALC880 */
+/* pcm configuration: identical with ALC880 */
#define alc262_pcm_analog_playback alc880_pcm_analog_playback
#define alc262_pcm_analog_capture alc880_pcm_analog_capture
#define alc262_pcm_digital_playback alc880_pcm_digital_playback
@@ -12286,7 +12358,7 @@
AC_VERB_SET_AMP_GAIN_MUTE, dac_vol2);
}
-/* pcm configuration: identiacal with ALC880 */
+/* pcm configuration: identical with ALC880 */
#define alc268_pcm_analog_playback alc880_pcm_analog_playback
#define alc268_pcm_analog_capture alc880_pcm_analog_capture
#define alc268_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
@@ -13197,7 +13269,7 @@
#define alc269_loopbacks alc880_loopbacks
#endif
-/* pcm configuration: identiacal with ALC880 */
+/* pcm configuration: identical with ALC880 */
#define alc269_pcm_analog_playback alc880_pcm_analog_playback
#define alc269_pcm_analog_capture alc880_pcm_analog_capture
#define alc269_pcm_digital_playback alc880_pcm_digital_playback
@@ -14059,7 +14131,7 @@
alc861_toshiba_automute(codec);
}
-/* pcm configuration: identiacal with ALC880 */
+/* pcm configuration: identical with ALC880 */
#define alc861_pcm_analog_playback alc880_pcm_analog_playback
#define alc861_pcm_analog_capture alc880_pcm_analog_capture
#define alc861_pcm_digital_playback alc880_pcm_digital_playback
@@ -14582,7 +14654,7 @@
/* dac_nids for ALC660vd are in a different order - according to
* Realtek's driver.
- * This should probably tesult in a different mixer for 6stack models
+ * This should probably result in a different mixer for 6stack models
* of ALC660vd codecs, but for now there is only 3stack mixer
* - and it is the same as in 861vd.
* adc_nids in ALC660vd are (is) the same as in 861vd
@@ -15027,7 +15099,7 @@
#define alc861vd_loopbacks alc880_loopbacks
#endif
-/* pcm configuration: identiacal with ALC880 */
+/* pcm configuration: identical with ALC880 */
#define alc861vd_pcm_analog_playback alc880_pcm_analog_playback
#define alc861vd_pcm_analog_capture alc880_pcm_analog_capture
#define alc861vd_pcm_digital_playback alc880_pcm_digital_playback
@@ -15206,7 +15278,7 @@
hda_nid_t pin;
pin = spec->autocfg.hp_pins[0];
- if (pin) /* connect to front and use dac 0 */
+ if (pin) /* connect to front and use dac 0 */
alc861vd_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
pin = spec->autocfg.speaker_pins[0];
if (pin)
@@ -16669,7 +16741,7 @@
#endif
-/* pcm configuration: identiacal with ALC880 */
+/* pcm configuration: identical with ALC880 */
#define alc662_pcm_analog_playback alc880_pcm_analog_playback
#define alc662_pcm_analog_capture alc880_pcm_analog_capture
#define alc662_pcm_digital_playback alc880_pcm_digital_playback
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index d5be2b3..fefe1a5 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -320,38 +320,6 @@
.codec_dev = &soc_codec_dev_wm8731,
};
-/*
- * FIXME: This is a temporary bodge to avoid cross-tree merge issues.
- * New drivers should register the wm8731 I2C device in the machine
- * setup code (under arch/arm for ARM systems).
- */
-static int wm8731_i2c_register(void)
-{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = 0x1b;
- strlcpy(info.type, "wm8731", I2C_NAME_SIZE);
-
- adapter = i2c_get_adapter(0);
- if (!adapter) {
- printk(KERN_ERR "can't get i2c adapter 0\n");
- return -ENODEV;
- }
-
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- printk(KERN_ERR "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- return -ENODEV;
- }
-
- return 0;
-}
-
static struct platform_device *corgi_snd_device;
static int __init corgi_init(void)
@@ -362,10 +330,6 @@
machine_is_husky()))
return -ENODEV;
- ret = wm8731_i2c_register();
- if (ret != 0)
- return ret;
-
corgi_snd_device = platform_device_alloc("soc-audio", -1);
if (!corgi_snd_device)
return -ENOMEM;
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index a51058f..c5f36e0 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -280,38 +280,6 @@
.num_links = 1,
};
-/*
- * FIXME: This is a temporary bodge to avoid cross-tree merge issues.
- * New drivers should register the wm8731 I2C device in the machine
- * setup code (under arch/arm for ARM systems).
- */
-static int wm8731_i2c_register(void)
-{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = 0x1b;
- strlcpy(info.type, "wm8731", I2C_NAME_SIZE);
-
- adapter = i2c_get_adapter(0);
- if (!adapter) {
- printk(KERN_ERR "can't get i2c adapter 0\n");
- return -ENODEV;
- }
-
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- printk(KERN_ERR "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- return -ENODEV;
- }
-
- return 0;
-}
-
/* poodle audio subsystem */
static struct snd_soc_device poodle_snd_devdata = {
.card = &snd_soc_poodle,
@@ -327,10 +295,6 @@
if (!machine_is_poodle())
return -ENODEV;
- ret = wm8731_i2c_register();
- if (ret != 0)
- return ret;
-
locomo_gpio_set_dir(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_AMP_ON, 0);
/* should we mute HP at startup - burning power ?*/
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index fa33661..938a58a 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -297,9 +297,9 @@
static bool filter(struct dma_chan *chan, void *param)
{
struct txx9aclc_dmadata *dmadata = param;
- char devname[BUS_ID_SIZE + 2];
+ char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */
- sprintf(devname, "%s.%d", dmadata->dma_res->name,
+ snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name,
(int)dmadata->dma_res->start);
if (strcmp(dev_name(chan->device->dev), devname) == 0) {
chan->private = &dmadata->dma_slave;
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index b144513..8f9b60c 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -199,8 +199,9 @@
dev->period_out_count[index] = BYTES_PER_SAMPLE + 1;
dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1;
} else {
- dev->period_in_count[index] = BYTES_PER_SAMPLE;
- dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE;
+ int in_pos = (dev->spec.data_alignment == 2) ? 0 : 2;
+ dev->period_in_count[index] = BYTES_PER_SAMPLE + in_pos;
+ dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE + in_pos;
}
if (dev->streaming)
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 2240624..0e5db71 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -35,7 +35,7 @@
#include "input.h"
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
-MODULE_DESCRIPTION("caiaq USB audio, version 1.3.16");
+MODULE_DESCRIPTION("caiaq USB audio, version 1.3.17");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
"{Native Instruments, RigKontrol3},"
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 0cbd5d6..36d7eef 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -157,10 +157,15 @@
uname_P := $(shell sh -c 'uname -p 2>/dev/null || echo not')
uname_V := $(shell sh -c 'uname -v 2>/dev/null || echo not')
+# If we're on a 64-bit kernel, use -m64
+ifneq ($(patsubst %64,%,$(uname_M)),$(uname_M))
+ M64 := -m64
+endif
+
# CFLAGS and LDFLAGS are for the users to override from the command line.
-CFLAGS = -ggdb3 -Wall -Werror -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -O6
-LDFLAGS = -lpthread -lrt -lelf
+CFLAGS = $(M64) -ggdb3 -Wall -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -Werror -O6
+LDFLAGS = -lpthread -lrt -lelf -lm
ALL_CFLAGS = $(CFLAGS)
ALL_LDFLAGS = $(LDFLAGS)
STRIP ?= strip
@@ -285,6 +290,7 @@
LIB_H += ../../include/linux/perf_counter.h
LIB_H += perf.h
+LIB_H += types.h
LIB_H += util/list.h
LIB_H += util/rbtree.h
LIB_H += util/levenshtein.h
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index b1ed5f7..7e58e3a 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -25,6 +25,10 @@
#define SHOW_USER 2
#define SHOW_HV 4
+#define MIN_GREEN 0.5
+#define MIN_RED 5.0
+
+
static char const *input_name = "perf.data";
static char *vmlinux = "vmlinux";
@@ -39,40 +43,42 @@
static int verbose;
+static int print_line;
+
static unsigned long page_size;
static unsigned long mmap_window = 32;
struct ip_event {
struct perf_event_header header;
- __u64 ip;
- __u32 pid, tid;
+ u64 ip;
+ u32 pid, tid;
};
struct mmap_event {
struct perf_event_header header;
- __u32 pid, tid;
- __u64 start;
- __u64 len;
- __u64 pgoff;
+ u32 pid, tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
char filename[PATH_MAX];
};
struct comm_event {
struct perf_event_header header;
- __u32 pid, tid;
+ u32 pid, tid;
char comm[16];
};
struct fork_event {
struct perf_event_header header;
- __u32 pid, ppid;
+ u32 pid, ppid;
};
struct period_event {
struct perf_event_header header;
- __u64 time;
- __u64 id;
- __u64 sample_period;
+ u64 time;
+ u64 id;
+ u64 sample_period;
};
typedef union event_union {
@@ -84,6 +90,13 @@
struct period_event period;
} event_t;
+
+struct sym_ext {
+ struct rb_node node;
+ double percent;
+ char *path;
+};
+
static LIST_HEAD(dsos);
static struct dso *kernel_dso;
static struct dso *vdso;
@@ -145,7 +158,7 @@
dso__fprintf(pos, fp);
}
-static struct symbol *vdso__find_symbol(struct dso *dso, __u64 ip)
+static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
{
return dso__find_symbol(kernel_dso, ip);
}
@@ -178,19 +191,19 @@
struct map {
struct list_head node;
- __u64 start;
- __u64 end;
- __u64 pgoff;
- __u64 (*map_ip)(struct map *, __u64);
+ u64 start;
+ u64 end;
+ u64 pgoff;
+ u64 (*map_ip)(struct map *, u64);
struct dso *dso;
};
-static __u64 map__map_ip(struct map *map, __u64 ip)
+static u64 map__map_ip(struct map *map, u64 ip)
{
return ip - map->start + map->pgoff;
}
-static __u64 vdso__map_ip(struct map *map, __u64 ip)
+static u64 vdso__map_ip(struct map *map, u64 ip)
{
return ip;
}
@@ -373,7 +386,7 @@
return 0;
}
-static struct map *thread__find_map(struct thread *self, __u64 ip)
+static struct map *thread__find_map(struct thread *self, u64 ip)
{
struct map *pos;
@@ -414,7 +427,7 @@
struct map *map;
struct dso *dso;
struct symbol *sym;
- __u64 ip;
+ u64 ip;
char level;
uint32_t count;
@@ -519,7 +532,7 @@
if (self->dso)
return fprintf(fp, "%-25s", self->dso->name);
- return fprintf(fp, "%016llx ", (__u64)self->ip);
+ return fprintf(fp, "%016llx ", (u64)self->ip);
}
static struct sort_entry sort_dso = {
@@ -533,7 +546,7 @@
static int64_t
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
- __u64 ip_l, ip_r;
+ u64 ip_l, ip_r;
if (left->sym == right->sym)
return 0;
@@ -550,13 +563,13 @@
size_t ret = 0;
if (verbose)
- ret += fprintf(fp, "%#018llx ", (__u64)self->ip);
+ ret += fprintf(fp, "%#018llx ", (u64)self->ip);
if (self->sym) {
ret += fprintf(fp, "[%c] %s",
self->dso == kernel_dso ? 'k' : '.', self->sym->name);
} else {
- ret += fprintf(fp, "%#016llx", (__u64)self->ip);
+ ret += fprintf(fp, "%#016llx", (u64)self->ip);
}
return ret;
@@ -647,7 +660,7 @@
/*
* collect histogram counts
*/
-static void hist_hit(struct hist_entry *he, __u64 ip)
+static void hist_hit(struct hist_entry *he, u64 ip)
{
unsigned int sym_size, offset;
struct symbol *sym = he->sym;
@@ -676,7 +689,7 @@
static int
hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
- struct symbol *sym, __u64 ip, char level)
+ struct symbol *sym, u64 ip, char level)
{
struct rb_node **p = &hist.rb_node;
struct rb_node *parent = NULL;
@@ -848,7 +861,7 @@
int show = 0;
struct dso *dso = NULL;
struct thread *thread = threads__findnew(event->ip.pid);
- __u64 ip = event->ip.ip;
+ u64 ip = event->ip.ip;
struct map *map = NULL;
dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
@@ -1030,13 +1043,33 @@
return 0;
}
+static char *get_color(double percent)
+{
+ char *color = PERF_COLOR_NORMAL;
+
+ /*
+ * We color high-overhead entries in red, mid-overhead
+ * entries in green - and keep the low overhead places
+ * normal:
+ */
+ if (percent >= MIN_RED)
+ color = PERF_COLOR_RED;
+ else {
+ if (percent > MIN_GREEN)
+ color = PERF_COLOR_GREEN;
+ }
+ return color;
+}
+
static int
-parse_line(FILE *file, struct symbol *sym, __u64 start, __u64 len)
+parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
{
char *line = NULL, *tmp, *tmp2;
+ static const char *prev_line;
+ static const char *prev_color;
unsigned int offset;
size_t line_len;
- __u64 line_ip;
+ u64 line_ip;
int ret;
char *c;
@@ -1073,27 +1106,36 @@
}
if (line_ip != -1) {
+ const char *path = NULL;
unsigned int hits = 0;
double percent = 0.0;
- char *color = PERF_COLOR_NORMAL;
+ char *color;
+ struct sym_ext *sym_ext = sym->priv;
offset = line_ip - start;
if (offset < len)
hits = sym->hist[offset];
- if (sym->hist_sum)
+ if (offset < len && sym_ext) {
+ path = sym_ext[offset].path;
+ percent = sym_ext[offset].percent;
+ } else if (sym->hist_sum)
percent = 100.0 * hits / sym->hist_sum;
+ color = get_color(percent);
+
/*
- * We color high-overhead entries in red, mid-overhead
- * entries in green - and keep the low overhead places
- * normal:
+ * Also color the filename and line if needed, with
+ * the same color than the percentage. Don't print it
+ * twice for close colored ip with the same filename:line
*/
- if (percent >= 5.0)
- color = PERF_COLOR_RED;
- else {
- if (percent > 0.5)
- color = PERF_COLOR_GREEN;
+ if (path) {
+ if (!prev_line || strcmp(prev_line, path)
+ || color != prev_color) {
+ color_fprintf(stdout, color, " %s", path);
+ prev_line = path;
+ prev_color = color;
+ }
}
color_fprintf(stdout, color, " %7.2f", percent);
@@ -1109,10 +1151,125 @@
return 0;
}
+static struct rb_root root_sym_ext;
+
+static void insert_source_line(struct sym_ext *sym_ext)
+{
+ struct sym_ext *iter;
+ struct rb_node **p = &root_sym_ext.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct sym_ext, node);
+
+ if (sym_ext->percent > iter->percent)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&sym_ext->node, parent, p);
+ rb_insert_color(&sym_ext->node, &root_sym_ext);
+}
+
+static void free_source_line(struct symbol *sym, int len)
+{
+ struct sym_ext *sym_ext = sym->priv;
+ int i;
+
+ if (!sym_ext)
+ return;
+
+ for (i = 0; i < len; i++)
+ free(sym_ext[i].path);
+ free(sym_ext);
+
+ sym->priv = NULL;
+ root_sym_ext = RB_ROOT;
+}
+
+/* Get the filename:line for the colored entries */
+static void
+get_source_line(struct symbol *sym, u64 start, int len, char *filename)
+{
+ int i;
+ char cmd[PATH_MAX * 2];
+ struct sym_ext *sym_ext;
+
+ if (!sym->hist_sum)
+ return;
+
+ sym->priv = calloc(len, sizeof(struct sym_ext));
+ if (!sym->priv)
+ return;
+
+ sym_ext = sym->priv;
+
+ for (i = 0; i < len; i++) {
+ char *path = NULL;
+ size_t line_len;
+ u64 offset;
+ FILE *fp;
+
+ sym_ext[i].percent = 100.0 * sym->hist[i] / sym->hist_sum;
+ if (sym_ext[i].percent <= 0.5)
+ continue;
+
+ offset = start + i;
+ sprintf(cmd, "addr2line -e %s %016llx", filename, offset);
+ fp = popen(cmd, "r");
+ if (!fp)
+ continue;
+
+ if (getline(&path, &line_len, fp) < 0 || !line_len)
+ goto next;
+
+ sym_ext[i].path = malloc(sizeof(char) * line_len + 1);
+ if (!sym_ext[i].path)
+ goto next;
+
+ strcpy(sym_ext[i].path, path);
+ insert_source_line(&sym_ext[i]);
+
+ next:
+ pclose(fp);
+ }
+}
+
+static void print_summary(char *filename)
+{
+ struct sym_ext *sym_ext;
+ struct rb_node *node;
+
+ printf("\nSorted summary for file %s\n", filename);
+ printf("----------------------------------------------\n\n");
+
+ if (RB_EMPTY_ROOT(&root_sym_ext)) {
+ printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
+ return;
+ }
+
+ node = rb_first(&root_sym_ext);
+ while (node) {
+ double percent;
+ char *color;
+ char *path;
+
+ sym_ext = rb_entry(node, struct sym_ext, node);
+ percent = sym_ext->percent;
+ color = get_color(percent);
+ path = sym_ext->path;
+
+ color_fprintf(stdout, color, " %7.2f %s", percent, path);
+ node = rb_next(node);
+ }
+}
+
static void annotate_sym(struct dso *dso, struct symbol *sym)
{
char *filename = dso->name;
- __u64 start, end, len;
+ u64 start, end, len;
char command[PATH_MAX*2];
FILE *file;
@@ -1121,13 +1278,6 @@
if (dso == kernel_dso)
filename = vmlinux;
- printf("\n------------------------------------------------\n");
- printf(" Percent | Source code & Disassembly of %s\n", filename);
- printf("------------------------------------------------\n");
-
- if (verbose >= 2)
- printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
-
start = sym->obj_start;
if (!start)
start = sym->start;
@@ -1135,7 +1285,19 @@
end = start + sym->end - sym->start + 1;
len = sym->end - sym->start;
- sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (__u64)start, (__u64)end, filename);
+ if (print_line) {
+ get_source_line(sym, start, len, filename);
+ print_summary(filename);
+ }
+
+ printf("\n\n------------------------------------------------\n");
+ printf(" Percent | Source code & Disassembly of %s\n", filename);
+ printf("------------------------------------------------\n");
+
+ if (verbose >= 2)
+ printf("annotating [%p] %30s : [%p] %30s\n", dso, dso->name, sym, sym->name);
+
+ sprintf(command, "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS %s", (u64)start, (u64)end, filename);
if (verbose >= 3)
printf("doing: %s\n", command);
@@ -1150,6 +1312,8 @@
}
pclose(file);
+ if (print_line)
+ free_source_line(sym, len);
}
static void find_annotations(void)
@@ -1308,6 +1472,8 @@
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
+ OPT_BOOLEAN('l', "print-line", &print_line,
+ "print matching source lines (may be slow)"),
OPT_END()
};
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 0f5771f..d7ebbd7 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -37,33 +37,37 @@
static int inherit = 1;
static int force = 0;
static int append_file = 0;
+static int call_graph = 0;
static int verbose = 0;
static long samples;
static struct timeval last_read;
static struct timeval this_read;
-static __u64 bytes_written;
+static u64 bytes_written;
static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
static int nr_poll;
static int nr_cpu;
+static int file_new = 1;
+static struct perf_file_header file_header;
+
struct mmap_event {
struct perf_event_header header;
- __u32 pid;
- __u32 tid;
- __u64 start;
- __u64 len;
- __u64 pgoff;
+ u32 pid;
+ u32 tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
char filename[PATH_MAX];
};
struct comm_event {
struct perf_event_header header;
- __u32 pid;
- __u32 tid;
+ u32 pid;
+ u32 tid;
char comm[16];
};
@@ -77,10 +81,10 @@
static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
-static unsigned int mmap_read_head(struct mmap_data *md)
+static unsigned long mmap_read_head(struct mmap_data *md)
{
struct perf_counter_mmap_page *pc = md->base;
- int head;
+ long head;
head = pc->data_head;
rmb();
@@ -88,6 +92,32 @@
return head;
}
+static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
+{
+ struct perf_counter_mmap_page *pc = md->base;
+
+ /*
+ * ensure all reads are done before we write the tail out.
+ */
+ /* mb(); */
+ pc->data_tail = tail;
+}
+
+static void write_output(void *buf, size_t size)
+{
+ while (size) {
+ int ret = write(output, buf, size);
+
+ if (ret < 0)
+ die("failed to write");
+
+ size -= ret;
+ buf += ret;
+
+ bytes_written += ret;
+ }
+}
+
static void mmap_read(struct mmap_data *md)
{
unsigned int head = mmap_read_head(md);
@@ -108,7 +138,7 @@
* In either case, truncate and restart at head.
*/
diff = head - old;
- if (diff > md->mask / 2 || diff < 0) {
+ if (diff < 0) {
struct timeval iv;
unsigned long msecs;
@@ -136,36 +166,17 @@
size = md->mask + 1 - (old & md->mask);
old += size;
- while (size) {
- int ret = write(output, buf, size);
-
- if (ret < 0)
- die("failed to write");
-
- size -= ret;
- buf += ret;
-
- bytes_written += ret;
- }
+ write_output(buf, size);
}
buf = &data[old & md->mask];
size = head - old;
old += size;
- while (size) {
- int ret = write(output, buf, size);
-
- if (ret < 0)
- die("failed to write");
-
- size -= ret;
- buf += ret;
-
- bytes_written += ret;
- }
+ write_output(buf, size);
md->prev = old;
+ mmap_write_tail(md, old);
}
static volatile int done = 0;
@@ -191,7 +202,7 @@
struct comm_event comm_ev;
char filename[PATH_MAX];
char bf[BUFSIZ];
- int fd, ret;
+ int fd;
size_t size;
char *field, *sep;
DIR *tasks;
@@ -201,8 +212,12 @@
fd = open(filename, O_RDONLY);
if (fd < 0) {
- fprintf(stderr, "couldn't open %s\n", filename);
- exit(EXIT_FAILURE);
+ /*
+ * We raced with a task exiting - just return:
+ */
+ if (verbose)
+ fprintf(stderr, "couldn't open %s\n", filename);
+ return;
}
if (read(fd, bf, sizeof(bf)) < 0) {
fprintf(stderr, "couldn't read %s\n", filename);
@@ -223,17 +238,13 @@
comm_ev.pid = pid;
comm_ev.header.type = PERF_EVENT_COMM;
- size = ALIGN(size, sizeof(__u64));
+ size = ALIGN(size, sizeof(u64));
comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size);
if (!full) {
comm_ev.tid = pid;
- ret = write(output, &comm_ev, comm_ev.header.size);
- if (ret < 0) {
- perror("failed to write");
- exit(-1);
- }
+ write_output(&comm_ev, comm_ev.header.size);
return;
}
@@ -248,11 +259,7 @@
comm_ev.tid = pid;
- ret = write(output, &comm_ev, comm_ev.header.size);
- if (ret < 0) {
- perror("failed to write");
- exit(-1);
- }
+ write_output(&comm_ev, comm_ev.header.size);
}
closedir(tasks);
return;
@@ -272,8 +279,12 @@
fp = fopen(filename, "r");
if (fp == NULL) {
- fprintf(stderr, "couldn't open %s\n", filename);
- exit(EXIT_FAILURE);
+ /*
+ * We raced with a task exiting - just return:
+ */
+ if (verbose)
+ fprintf(stderr, "couldn't open %s\n", filename);
+ return;
}
while (1) {
char bf[BUFSIZ], *pbf = bf;
@@ -304,17 +315,14 @@
size = strlen(execname);
execname[size - 1] = '\0'; /* Remove \n */
memcpy(mmap_ev.filename, execname, size);
- size = ALIGN(size, sizeof(__u64));
+ size = ALIGN(size, sizeof(u64));
mmap_ev.len -= mmap_ev.start;
mmap_ev.header.size = (sizeof(mmap_ev) -
(sizeof(mmap_ev.filename) - size));
mmap_ev.pid = pid;
mmap_ev.tid = pid;
- if (write(output, &mmap_ev, mmap_ev.header.size) < 0) {
- perror("failed to write");
- exit(-1);
- }
+ write_output(&mmap_ev, mmap_ev.header.size);
}
}
@@ -351,11 +359,25 @@
int track = 1;
attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+
if (freq) {
attr->sample_type |= PERF_SAMPLE_PERIOD;
attr->freq = 1;
attr->sample_freq = freq;
}
+
+ if (call_graph)
+ attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
+
+ if (file_new) {
+ file_header.sample_type = attr->sample_type;
+ } else {
+ if (file_header.sample_type != attr->sample_type) {
+ fprintf(stderr, "incompatible append\n");
+ exit(-1);
+ }
+ }
+
attr->mmap = track;
attr->comm = track;
attr->inherit = (cpu < 0) && inherit;
@@ -410,7 +432,7 @@
mmap_array[nr_cpu][counter].prev = 0;
mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1;
mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
- PROT_READ, MAP_SHARED, fd[nr_cpu][counter], 0);
+ PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter], 0);
if (mmap_array[nr_cpu][counter].base == MAP_FAILED) {
error("failed to mmap with %d (%s)\n", errno, strerror(errno));
exit(-1);
@@ -435,6 +457,14 @@
nr_cpu++;
}
+static void atexit_header(void)
+{
+ file_header.data_size += bytes_written;
+
+ if (pwrite(output, &file_header, sizeof(file_header), 0) == -1)
+ perror("failed to write on file headers");
+}
+
static int __cmd_record(int argc, const char **argv)
{
int i, counter;
@@ -448,6 +478,10 @@
assert(nr_cpus <= MAX_NR_CPUS);
assert(nr_cpus >= 0);
+ atexit(sig_atexit);
+ signal(SIGCHLD, sig_handler);
+ signal(SIGINT, sig_handler);
+
if (!stat(output_name, &st) && !force && !append_file) {
fprintf(stderr, "Error, output file %s exists, use -A to append or -f to overwrite.\n",
output_name);
@@ -456,7 +490,7 @@
flags = O_CREAT|O_RDWR;
if (append_file)
- flags |= O_APPEND;
+ file_new = 0;
else
flags |= O_TRUNC;
@@ -466,15 +500,22 @@
exit(-1);
}
+ if (!file_new) {
+ if (read(output, &file_header, sizeof(file_header)) == -1) {
+ perror("failed to read file headers");
+ exit(-1);
+ }
+
+ lseek(output, file_header.data_size, SEEK_CUR);
+ }
+
+ atexit(atexit_header);
+
if (!system_wide) {
open_counters(-1, target_pid != -1 ? target_pid : getpid());
} else for (i = 0; i < nr_cpus; i++)
open_counters(i, target_pid);
- atexit(sig_atexit);
- signal(SIGCHLD, sig_handler);
- signal(SIGINT, sig_handler);
-
if (target_pid == -1 && argc) {
pid = fork();
if (pid < 0)
@@ -555,6 +596,8 @@
"profile at this frequency"),
OPT_INTEGER('m', "mmap-pages", &mmap_pages,
"number of mmap data pages"),
+ OPT_BOOLEAN('g', "call-graph", &call_graph,
+ "do call-graph (stack chain/backtrace) recording"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_END()
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 82fa93b..5eb5566 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -36,45 +36,65 @@
static int dump_trace = 0;
#define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
+#define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
static int verbose;
+#define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
+
static int full_paths;
static unsigned long page_size;
static unsigned long mmap_window = 32;
+static char default_parent_pattern[] = "^sys_|^do_page_fault";
+static char *parent_pattern = default_parent_pattern;
+static regex_t parent_regex;
+
+static int exclude_other = 1;
+
struct ip_event {
struct perf_event_header header;
- __u64 ip;
- __u32 pid, tid;
- __u64 period;
+ u64 ip;
+ u32 pid, tid;
+ unsigned char __more_data[];
+};
+
+struct ip_callchain {
+ u64 nr;
+ u64 ips[0];
};
struct mmap_event {
struct perf_event_header header;
- __u32 pid, tid;
- __u64 start;
- __u64 len;
- __u64 pgoff;
+ u32 pid, tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
char filename[PATH_MAX];
};
struct comm_event {
struct perf_event_header header;
- __u32 pid, tid;
+ u32 pid, tid;
char comm[16];
};
struct fork_event {
struct perf_event_header header;
- __u32 pid, ppid;
+ u32 pid, ppid;
};
struct period_event {
struct perf_event_header header;
- __u64 time;
- __u64 id;
- __u64 sample_period;
+ u64 time;
+ u64 id;
+ u64 sample_period;
+};
+
+struct lost_event {
+ struct perf_event_header header;
+ u64 id;
+ u64 lost;
};
typedef union event_union {
@@ -84,6 +104,7 @@
struct comm_event comm;
struct fork_event fork;
struct period_event period;
+ struct lost_event lost;
} event_t;
static LIST_HEAD(dsos);
@@ -119,15 +140,11 @@
nr = dso__load(dso, NULL, verbose);
if (nr < 0) {
- if (verbose)
- fprintf(stderr, "Failed to open: %s\n", name);
+ eprintf("Failed to open: %s\n", name);
goto out_delete_dso;
}
- if (!nr && verbose) {
- fprintf(stderr,
- "No symbols found in: %s, maybe install a debug package?\n",
- name);
- }
+ if (!nr)
+ eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
dsos__add(dso);
@@ -146,7 +163,7 @@
dso__fprintf(pos, fp);
}
-static struct symbol *vdso__find_symbol(struct dso *dso, __u64 ip)
+static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
{
return dso__find_symbol(kernel_dso, ip);
}
@@ -193,19 +210,19 @@
struct map {
struct list_head node;
- __u64 start;
- __u64 end;
- __u64 pgoff;
- __u64 (*map_ip)(struct map *, __u64);
+ u64 start;
+ u64 end;
+ u64 pgoff;
+ u64 (*map_ip)(struct map *, u64);
struct dso *dso;
};
-static __u64 map__map_ip(struct map *map, __u64 ip)
+static u64 map__map_ip(struct map *map, u64 ip)
{
return ip - map->start + map->pgoff;
}
-static __u64 vdso__map_ip(struct map *map, __u64 ip)
+static u64 vdso__map_ip(struct map *map, u64 ip)
{
return ip;
}
@@ -412,7 +429,7 @@
return 0;
}
-static struct map *thread__find_map(struct thread *self, __u64 ip)
+static struct map *thread__find_map(struct thread *self, u64 ip)
{
struct map *pos;
@@ -453,10 +470,11 @@
struct map *map;
struct dso *dso;
struct symbol *sym;
- __u64 ip;
+ struct symbol *parent;
+ u64 ip;
char level;
- __u64 count;
+ u64 count;
};
/*
@@ -473,6 +491,16 @@
size_t (*print)(FILE *fp, struct hist_entry *);
};
+static int64_t cmp_null(void *l, void *r)
+{
+ if (!l && !r)
+ return 0;
+ else if (!l)
+ return -1;
+ else
+ return 1;
+}
+
/* --sort pid */
static int64_t
@@ -507,14 +535,8 @@
char *comm_l = left->thread->comm;
char *comm_r = right->thread->comm;
- if (!comm_l || !comm_r) {
- if (!comm_l && !comm_r)
- return 0;
- else if (!comm_l)
- return -1;
- else
- return 1;
- }
+ if (!comm_l || !comm_r)
+ return cmp_null(comm_l, comm_r);
return strcmp(comm_l, comm_r);
}
@@ -540,14 +562,8 @@
struct dso *dso_l = left->dso;
struct dso *dso_r = right->dso;
- if (!dso_l || !dso_r) {
- if (!dso_l && !dso_r)
- return 0;
- else if (!dso_l)
- return -1;
- else
- return 1;
- }
+ if (!dso_l || !dso_r)
+ return cmp_null(dso_l, dso_r);
return strcmp(dso_l->name, dso_r->name);
}
@@ -558,7 +574,7 @@
if (self->dso)
return fprintf(fp, "%-25s", self->dso->name);
- return fprintf(fp, "%016llx ", (__u64)self->ip);
+ return fprintf(fp, "%016llx ", (u64)self->ip);
}
static struct sort_entry sort_dso = {
@@ -572,7 +588,7 @@
static int64_t
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
- __u64 ip_l, ip_r;
+ u64 ip_l, ip_r;
if (left->sym == right->sym)
return 0;
@@ -589,13 +605,13 @@
size_t ret = 0;
if (verbose)
- ret += fprintf(fp, "%#018llx ", (__u64)self->ip);
+ ret += fprintf(fp, "%#018llx ", (u64)self->ip);
if (self->sym) {
ret += fprintf(fp, "[%c] %s",
self->dso == kernel_dso ? 'k' : '.', self->sym->name);
} else {
- ret += fprintf(fp, "%#016llx", (__u64)self->ip);
+ ret += fprintf(fp, "%#016llx", (u64)self->ip);
}
return ret;
@@ -607,7 +623,38 @@
.print = sort__sym_print,
};
+/* --sort parent */
+
+static int64_t
+sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ struct symbol *sym_l = left->parent;
+ struct symbol *sym_r = right->parent;
+
+ if (!sym_l || !sym_r)
+ return cmp_null(sym_l, sym_r);
+
+ return strcmp(sym_l->name, sym_r->name);
+}
+
+static size_t
+sort__parent_print(FILE *fp, struct hist_entry *self)
+{
+ size_t ret = 0;
+
+ ret += fprintf(fp, "%-20s", self->parent ? self->parent->name : "[other]");
+
+ return ret;
+}
+
+static struct sort_entry sort_parent = {
+ .header = "Parent symbol ",
+ .cmp = sort__parent_cmp,
+ .print = sort__parent_print,
+};
+
static int sort__need_collapse = 0;
+static int sort__has_parent = 0;
struct sort_dimension {
char *name;
@@ -620,6 +667,7 @@
{ .name = "comm", .entry = &sort_comm, },
{ .name = "dso", .entry = &sort_dso, },
{ .name = "symbol", .entry = &sort_sym, },
+ { .name = "parent", .entry = &sort_parent, },
};
static LIST_HEAD(hist_entry__sort_list);
@@ -640,6 +688,19 @@
if (sd->entry->collapse)
sort__need_collapse = 1;
+ if (sd->entry == &sort_parent) {
+ int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
+ if (ret) {
+ char err[BUFSIZ];
+
+ regerror(ret, &parent_regex, err, sizeof(err));
+ fprintf(stderr, "Invalid regex: %s\n%s",
+ parent_pattern, err);
+ exit(-1);
+ }
+ sort__has_parent = 1;
+ }
+
list_add_tail(&sd->entry->list, &hist_entry__sort_list);
sd->taken = 1;
@@ -684,11 +745,14 @@
}
static size_t
-hist_entry__fprintf(FILE *fp, struct hist_entry *self, __u64 total_samples)
+hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
{
struct sort_entry *se;
size_t ret;
+ if (exclude_other && !self->parent)
+ return 0;
+
if (total_samples) {
double percent = self->count * 100.0 / total_samples;
char *color = PERF_COLOR_NORMAL;
@@ -711,6 +775,9 @@
ret = fprintf(fp, "%12Ld ", self->count);
list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (exclude_other && (se == &sort_parent))
+ continue;
+
fprintf(fp, " ");
ret += se->print(fp, self);
}
@@ -721,12 +788,72 @@
}
/*
+ *
+ */
+
+static struct symbol *
+resolve_symbol(struct thread *thread, struct map **mapp,
+ struct dso **dsop, u64 *ipp)
+{
+ struct dso *dso = dsop ? *dsop : NULL;
+ struct map *map = mapp ? *mapp : NULL;
+ uint64_t ip = *ipp;
+
+ if (!thread)
+ return NULL;
+
+ if (dso)
+ goto got_dso;
+
+ if (map)
+ goto got_map;
+
+ map = thread__find_map(thread, ip);
+ if (map != NULL) {
+ if (mapp)
+ *mapp = map;
+got_map:
+ ip = map->map_ip(map, ip);
+ *ipp = ip;
+
+ dso = map->dso;
+ } else {
+ /*
+ * If this is outside of all known maps,
+ * and is a negative address, try to look it
+ * up in the kernel dso, as it might be a
+ * vsyscall (which executes in user-mode):
+ */
+ if ((long long)ip < 0)
+ dso = kernel_dso;
+ }
+ dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
+
+ if (dsop)
+ *dsop = dso;
+
+ if (!dso)
+ return NULL;
+got_dso:
+ return dso->find_symbol(dso, ip);
+}
+
+static int call__match(struct symbol *sym)
+{
+ if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
+ return 1;
+
+ return 0;
+}
+
+/*
* collect histogram counts
*/
static int
hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
- struct symbol *sym, __u64 ip, char level, __u64 count)
+ struct symbol *sym, u64 ip, struct ip_callchain *chain,
+ char level, u64 count)
{
struct rb_node **p = &hist.rb_node;
struct rb_node *parent = NULL;
@@ -739,9 +866,41 @@
.ip = ip,
.level = level,
.count = count,
+ .parent = NULL,
};
int cmp;
+ if (sort__has_parent && chain) {
+ u64 context = PERF_CONTEXT_MAX;
+ int i;
+
+ for (i = 0; i < chain->nr; i++) {
+ u64 ip = chain->ips[i];
+ struct dso *dso = NULL;
+ struct symbol *sym;
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ context = ip;
+ continue;
+ }
+
+ switch (context) {
+ case PERF_CONTEXT_KERNEL:
+ dso = kernel_dso;
+ break;
+ default:
+ break;
+ }
+
+ sym = resolve_symbol(thread, NULL, &dso, &ip);
+
+ if (sym && call__match(sym)) {
+ entry.parent = sym;
+ break;
+ }
+ }
+ }
+
while (*p != NULL) {
parent = *p;
he = rb_entry(parent, struct hist_entry, rb_node);
@@ -873,7 +1032,7 @@
}
}
-static size_t output__fprintf(FILE *fp, __u64 total_samples)
+static size_t output__fprintf(FILE *fp, u64 total_samples)
{
struct hist_entry *pos;
struct sort_entry *se;
@@ -882,18 +1041,24 @@
fprintf(fp, "\n");
fprintf(fp, "#\n");
- fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples);
+ fprintf(fp, "# (%Ld samples)\n", (u64)total_samples);
fprintf(fp, "#\n");
fprintf(fp, "# Overhead");
- list_for_each_entry(se, &hist_entry__sort_list, list)
+ list_for_each_entry(se, &hist_entry__sort_list, list) {
+ if (exclude_other && (se == &sort_parent))
+ continue;
fprintf(fp, " %s", se->header);
+ }
fprintf(fp, "\n");
fprintf(fp, "# ........");
list_for_each_entry(se, &hist_entry__sort_list, list) {
int i;
+ if (exclude_other && (se == &sort_parent))
+ continue;
+
fprintf(fp, " ");
for (i = 0; i < strlen(se->header); i++)
fprintf(fp, ".");
@@ -907,7 +1072,8 @@
ret += hist_entry__fprintf(fp, pos, total_samples);
}
- if (!strcmp(sort_order, default_sort_order)) {
+ if (sort_order == default_sort_order &&
+ parent_pattern == default_parent_pattern) {
fprintf(fp, "#\n");
fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
fprintf(fp, "#\n");
@@ -932,7 +1098,21 @@
total_mmap = 0,
total_comm = 0,
total_fork = 0,
- total_unknown = 0;
+ total_unknown = 0,
+ total_lost = 0;
+
+static int validate_chain(struct ip_callchain *chain, event_t *event)
+{
+ unsigned int chain_size;
+
+ chain_size = event->header.size;
+ chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
+
+ if (chain->nr*sizeof(u64) > chain_size)
+ return -1;
+
+ return 0;
+}
static int
process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
@@ -941,12 +1121,16 @@
int show = 0;
struct dso *dso = NULL;
struct thread *thread = threads__findnew(event->ip.pid);
- __u64 ip = event->ip.ip;
- __u64 period = 1;
+ u64 ip = event->ip.ip;
+ u64 period = 1;
struct map *map = NULL;
+ void *more_data = event->ip.__more_data;
+ struct ip_callchain *chain = NULL;
- if (event->header.type & PERF_SAMPLE_PERIOD)
- period = event->ip.period;
+ if (event->header.type & PERF_SAMPLE_PERIOD) {
+ period = *(u64 *)more_data;
+ more_data += sizeof(u64);
+ }
dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n",
(void *)(offset + head),
@@ -956,10 +1140,28 @@
(void *)(long)ip,
(long long)period);
+ if (event->header.type & PERF_SAMPLE_CALLCHAIN) {
+ int i;
+
+ chain = (void *)more_data;
+
+ dprintf("... chain: nr:%Lu\n", chain->nr);
+
+ if (validate_chain(chain, event) < 0) {
+ eprintf("call-chain problem with event, skipping it.\n");
+ return 0;
+ }
+
+ if (dump_trace) {
+ for (i = 0; i < chain->nr; i++)
+ dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
+ }
+ }
+
dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
if (thread == NULL) {
- fprintf(stderr, "problem processing %d event, skipping it.\n",
+ eprintf("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
@@ -977,22 +1179,6 @@
show = SHOW_USER;
level = '.';
- map = thread__find_map(thread, ip);
- if (map != NULL) {
- ip = map->map_ip(map, ip);
- dso = map->dso;
- } else {
- /*
- * If this is outside of all known maps,
- * and is a negative address, try to look it
- * up in the kernel dso, as it might be a
- * vsyscall (which executes in user-mode):
- */
- if ((long long)ip < 0)
- dso = kernel_dso;
- }
- dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
-
} else {
show = SHOW_HV;
level = 'H';
@@ -1000,14 +1186,10 @@
}
if (show & show_mask) {
- struct symbol *sym = NULL;
+ struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
- if (dso)
- sym = dso->find_symbol(dso, ip);
-
- if (hist_entry__add(thread, map, dso, sym, ip, level, period)) {
- fprintf(stderr,
- "problem incrementing symbol count, skipping event\n");
+ if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
+ eprintf("problem incrementing symbol count, skipping event\n");
return -1;
}
}
@@ -1096,8 +1278,60 @@
}
static int
+process_lost_event(event_t *event, unsigned long offset, unsigned long head)
+{
+ dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->lost.id,
+ event->lost.lost);
+
+ total_lost += event->lost.lost;
+
+ return 0;
+}
+
+static void trace_event(event_t *event)
+{
+ unsigned char *raw_event = (void *)event;
+ char *color = PERF_COLOR_BLUE;
+ int i, j;
+
+ if (!dump_trace)
+ return;
+
+ dprintf(".");
+ cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
+
+ for (i = 0; i < event->header.size; i++) {
+ if ((i & 15) == 0) {
+ dprintf(".");
+ cdprintf(" %04x: ", i);
+ }
+
+ cdprintf(" %02x", raw_event[i]);
+
+ if (((i & 15) == 15) || i == event->header.size-1) {
+ cdprintf(" ");
+ for (j = 0; j < 15-(i & 15); j++)
+ cdprintf(" ");
+ for (j = 0; j < (i & 15); j++) {
+ if (isprint(raw_event[i-15+j]))
+ cdprintf("%c", raw_event[i-15+j]);
+ else
+ cdprintf(".");
+ }
+ cdprintf("\n");
+ }
+ }
+ dprintf(".\n");
+}
+
+static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
+ trace_event(event);
+
if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
return process_overflow_event(event, offset, head);
@@ -1113,6 +1347,10 @@
case PERF_EVENT_PERIOD:
return process_period_event(event, offset, head);
+
+ case PERF_EVENT_LOST:
+ return process_lost_event(event, offset, head);
+
/*
* We dont process them right now but they are fine:
*/
@@ -1128,11 +1366,13 @@
return 0;
}
+static struct perf_file_header file_header;
+
static int __cmd_report(void)
{
int ret, rc = EXIT_FAILURE;
unsigned long offset = 0;
- unsigned long head = 0;
+ unsigned long head = sizeof(file_header);
struct stat stat;
event_t *event;
uint32_t size;
@@ -1160,6 +1400,17 @@
exit(0);
}
+ if (read(input, &file_header, sizeof(file_header)) == -1) {
+ perror("failed to read file headers");
+ exit(-1);
+ }
+
+ if (sort__has_parent &&
+ !(file_header.sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ fprintf(stderr, "selected --sort parent, but no callchain data\n");
+ exit(-1);
+ }
+
if (load_kernel() < 0) {
perror("failed to load kernel symbols");
return EXIT_FAILURE;
@@ -1204,7 +1455,7 @@
size = event->header.size;
- dprintf("%p [%p]: event: %d\n",
+ dprintf("\n%p [%p]: event: %d\n",
(void *)(offset + head),
(void *)(long)event->header.size,
event->header.type);
@@ -1231,9 +1482,13 @@
head += size;
+ if (offset + head >= sizeof(file_header) + file_header.data_size)
+ goto done;
+
if (offset + head < stat.st_size)
goto more;
+done:
rc = EXIT_SUCCESS;
close(input);
@@ -1241,6 +1496,7 @@
dprintf(" mmap events: %10ld\n", total_mmap);
dprintf(" comm events: %10ld\n", total_comm);
dprintf(" fork events: %10ld\n", total_fork);
+ dprintf(" lost events: %10ld\n", total_lost);
dprintf(" unknown events: %10ld\n", total_unknown);
if (dump_trace)
@@ -1273,9 +1529,13 @@
"dump raw trace in ASCII"),
OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
+ "sort by key(s): pid, comm, dso, symbol, parent"),
OPT_BOOLEAN('P', "full-paths", &full_paths,
"Don't shorten the pathnames taking into account the cwd"),
+ OPT_STRING('p', "parent", &parent_pattern, "regex",
+ "regex filter to identify parent, see: '--sort parent'"),
+ OPT_BOOLEAN('x', "exclude-other", &exclude_other,
+ "Only display entries with parent-match"),
OPT_END()
};
@@ -1304,6 +1564,11 @@
setup_sorting();
+ if (parent_pattern != default_parent_pattern)
+ sort_dimension__add("parent");
+ else
+ exclude_other = 0;
+
/*
* Any (unrecognized) arguments left?
*/
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c43e4a9..6d3eeac 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -43,6 +43,7 @@
#include "util/parse-events.h"
#include <sys/prctl.h>
+#include <math.h>
static struct perf_counter_attr default_attrs[MAX_COUNTERS] = {
@@ -79,12 +80,34 @@
10000,
};
-static __u64 event_res[MAX_COUNTERS][3];
-static __u64 event_scaled[MAX_COUNTERS];
+#define MAX_RUN 100
-static __u64 runtime_nsecs;
-static __u64 walltime_nsecs;
-static __u64 runtime_cycles;
+static int run_count = 1;
+static int run_idx = 0;
+
+static u64 event_res[MAX_RUN][MAX_COUNTERS][3];
+static u64 event_scaled[MAX_RUN][MAX_COUNTERS];
+
+//static u64 event_hist[MAX_RUN][MAX_COUNTERS][3];
+
+
+static u64 runtime_nsecs[MAX_RUN];
+static u64 walltime_nsecs[MAX_RUN];
+static u64 runtime_cycles[MAX_RUN];
+
+static u64 event_res_avg[MAX_COUNTERS][3];
+static u64 event_res_noise[MAX_COUNTERS][3];
+
+static u64 event_scaled_avg[MAX_COUNTERS];
+
+static u64 runtime_nsecs_avg;
+static u64 runtime_nsecs_noise;
+
+static u64 walltime_nsecs_avg;
+static u64 walltime_nsecs_noise;
+
+static u64 runtime_cycles_avg;
+static u64 runtime_cycles_noise;
static void create_perf_stat_counter(int counter)
{
@@ -135,12 +158,12 @@
*/
static void read_counter(int counter)
{
- __u64 *count, single_count[3];
+ u64 *count, single_count[3];
ssize_t res;
int cpu, nv;
int scaled;
- count = event_res[counter];
+ count = event_res[run_idx][counter];
count[0] = count[1] = count[2] = 0;
@@ -149,8 +172,10 @@
if (fd[cpu][counter] < 0)
continue;
- res = read(fd[cpu][counter], single_count, nv * sizeof(__u64));
- assert(res == nv * sizeof(__u64));
+ res = read(fd[cpu][counter], single_count, nv * sizeof(u64));
+ assert(res == nv * sizeof(u64));
+ close(fd[cpu][counter]);
+ fd[cpu][counter] = -1;
count[0] += single_count[0];
if (scale) {
@@ -162,13 +187,13 @@
scaled = 0;
if (scale) {
if (count[2] == 0) {
- event_scaled[counter] = -1;
+ event_scaled[run_idx][counter] = -1;
count[0] = 0;
return;
}
if (count[2] < count[1]) {
- event_scaled[counter] = 1;
+ event_scaled[run_idx][counter] = 1;
count[0] = (unsigned long long)
((double)count[0] * count[1] / count[2] + 0.5);
}
@@ -178,68 +203,18 @@
*/
if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK)
- runtime_nsecs = count[0];
+ runtime_nsecs[run_idx] = count[0];
if (attrs[counter].type == PERF_TYPE_HARDWARE &&
attrs[counter].config == PERF_COUNT_HW_CPU_CYCLES)
- runtime_cycles = count[0];
+ runtime_cycles[run_idx] = count[0];
}
-/*
- * Print out the results of a single counter:
- */
-static void print_counter(int counter)
-{
- __u64 *count;
- int scaled;
-
- count = event_res[counter];
- scaled = event_scaled[counter];
-
- if (scaled == -1) {
- fprintf(stderr, " %14s %-20s\n",
- "<not counted>", event_name(counter));
- return;
- }
-
- if (nsec_counter(counter)) {
- double msecs = (double)count[0] / 1000000;
-
- fprintf(stderr, " %14.6f %-20s",
- msecs, event_name(counter));
- if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
- attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) {
-
- if (walltime_nsecs)
- fprintf(stderr, " # %11.3f CPU utilization factor",
- (double)count[0] / (double)walltime_nsecs);
- }
- } else {
- fprintf(stderr, " %14Ld %-20s",
- count[0], event_name(counter));
- if (runtime_nsecs)
- fprintf(stderr, " # %11.3f M/sec",
- (double)count[0]/runtime_nsecs*1000.0);
- if (runtime_cycles &&
- attrs[counter].type == PERF_TYPE_HARDWARE &&
- attrs[counter].config == PERF_COUNT_HW_INSTRUCTIONS) {
-
- fprintf(stderr, " # %1.3f per cycle",
- (double)count[0] / (double)runtime_cycles);
- }
- }
- if (scaled)
- fprintf(stderr, " (scaled from %.2f%%)",
- (double) count[2] / count[1] * 100);
- fprintf(stderr, "\n");
-}
-
-static int do_perf_stat(int argc, const char **argv)
+static int run_perf_stat(int argc, const char **argv)
{
unsigned long long t0, t1;
+ int status = 0;
int counter;
- int status;
int pid;
- int i;
if (!system_wide)
nr_cpus = 1;
@@ -263,13 +238,180 @@
}
}
- while (wait(&status) >= 0)
- ;
+ wait(&status);
prctl(PR_TASK_PERF_COUNTERS_DISABLE);
t1 = rdclock();
- walltime_nsecs = t1 - t0;
+ walltime_nsecs[run_idx] = t1 - t0;
+
+ for (counter = 0; counter < nr_counters; counter++)
+ read_counter(counter);
+
+ return WEXITSTATUS(status);
+}
+
+static void print_noise(u64 *count, u64 *noise)
+{
+ if (run_count > 1)
+ fprintf(stderr, " ( +- %7.3f%% )",
+ (double)noise[0]/(count[0]+1)*100.0);
+}
+
+static void nsec_printout(int counter, u64 *count, u64 *noise)
+{
+ double msecs = (double)count[0] / 1000000;
+
+ fprintf(stderr, " %14.6f %-20s", msecs, event_name(counter));
+
+ if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
+ attrs[counter].config == PERF_COUNT_SW_TASK_CLOCK) {
+
+ if (walltime_nsecs_avg)
+ fprintf(stderr, " # %10.3f CPUs ",
+ (double)count[0] / (double)walltime_nsecs_avg);
+ }
+ print_noise(count, noise);
+}
+
+static void abs_printout(int counter, u64 *count, u64 *noise)
+{
+ fprintf(stderr, " %14Ld %-20s", count[0], event_name(counter));
+
+ if (runtime_cycles_avg &&
+ attrs[counter].type == PERF_TYPE_HARDWARE &&
+ attrs[counter].config == PERF_COUNT_HW_INSTRUCTIONS) {
+
+ fprintf(stderr, " # %10.3f IPC ",
+ (double)count[0] / (double)runtime_cycles_avg);
+ } else {
+ if (runtime_nsecs_avg) {
+ fprintf(stderr, " # %10.3f M/sec",
+ (double)count[0]/runtime_nsecs_avg*1000.0);
+ }
+ }
+ print_noise(count, noise);
+}
+
+/*
+ * Print out the results of a single counter:
+ */
+static void print_counter(int counter)
+{
+ u64 *count, *noise;
+ int scaled;
+
+ count = event_res_avg[counter];
+ noise = event_res_noise[counter];
+ scaled = event_scaled_avg[counter];
+
+ if (scaled == -1) {
+ fprintf(stderr, " %14s %-20s\n",
+ "<not counted>", event_name(counter));
+ return;
+ }
+
+ if (nsec_counter(counter))
+ nsec_printout(counter, count, noise);
+ else
+ abs_printout(counter, count, noise);
+
+ if (scaled)
+ fprintf(stderr, " (scaled from %.2f%%)",
+ (double) count[2] / count[1] * 100);
+
+ fprintf(stderr, "\n");
+}
+
+/*
+ * normalize_noise noise values down to stddev:
+ */
+static void normalize_noise(u64 *val)
+{
+ double res;
+
+ res = (double)*val / (run_count * sqrt((double)run_count));
+
+ *val = (u64)res;
+}
+
+static void update_avg(const char *name, int idx, u64 *avg, u64 *val)
+{
+ *avg += *val;
+
+ if (verbose > 1)
+ fprintf(stderr, "debug: %20s[%d]: %Ld\n", name, idx, *val);
+}
+/*
+ * Calculate the averages and noises:
+ */
+static void calc_avg(void)
+{
+ int i, j;
+
+ if (verbose > 1)
+ fprintf(stderr, "\n");
+
+ for (i = 0; i < run_count; i++) {
+ update_avg("runtime", 0, &runtime_nsecs_avg, runtime_nsecs + i);
+ update_avg("walltime", 0, &walltime_nsecs_avg, walltime_nsecs + i);
+ update_avg("runtime_cycles", 0, &runtime_cycles_avg, runtime_cycles + i);
+
+ for (j = 0; j < nr_counters; j++) {
+ update_avg("counter/0", j,
+ event_res_avg[j]+0, event_res[i][j]+0);
+ update_avg("counter/1", j,
+ event_res_avg[j]+1, event_res[i][j]+1);
+ update_avg("counter/2", j,
+ event_res_avg[j]+2, event_res[i][j]+2);
+ update_avg("scaled", j,
+ event_scaled_avg + j, event_scaled[i]+j);
+ }
+ }
+ runtime_nsecs_avg /= run_count;
+ walltime_nsecs_avg /= run_count;
+ runtime_cycles_avg /= run_count;
+
+ for (j = 0; j < nr_counters; j++) {
+ event_res_avg[j][0] /= run_count;
+ event_res_avg[j][1] /= run_count;
+ event_res_avg[j][2] /= run_count;
+ }
+
+ for (i = 0; i < run_count; i++) {
+ runtime_nsecs_noise +=
+ abs((s64)(runtime_nsecs[i] - runtime_nsecs_avg));
+ walltime_nsecs_noise +=
+ abs((s64)(walltime_nsecs[i] - walltime_nsecs_avg));
+ runtime_cycles_noise +=
+ abs((s64)(runtime_cycles[i] - runtime_cycles_avg));
+
+ for (j = 0; j < nr_counters; j++) {
+ event_res_noise[j][0] +=
+ abs((s64)(event_res[i][j][0] - event_res_avg[j][0]));
+ event_res_noise[j][1] +=
+ abs((s64)(event_res[i][j][1] - event_res_avg[j][1]));
+ event_res_noise[j][2] +=
+ abs((s64)(event_res[i][j][2] - event_res_avg[j][2]));
+ }
+ }
+
+ normalize_noise(&runtime_nsecs_noise);
+ normalize_noise(&walltime_nsecs_noise);
+ normalize_noise(&runtime_cycles_noise);
+
+ for (j = 0; j < nr_counters; j++) {
+ normalize_noise(&event_res_noise[j][0]);
+ normalize_noise(&event_res_noise[j][1]);
+ normalize_noise(&event_res_noise[j][2]);
+ }
+}
+
+static void print_stat(int argc, const char **argv)
+{
+ int i, counter;
+
+ calc_avg();
fflush(stdout);
@@ -279,22 +421,19 @@
for (i = 1; i < argc; i++)
fprintf(stderr, " %s", argv[i]);
- fprintf(stderr, "\':\n");
- fprintf(stderr, "\n");
-
- for (counter = 0; counter < nr_counters; counter++)
- read_counter(counter);
+ fprintf(stderr, "\'");
+ if (run_count > 1)
+ fprintf(stderr, " (%d runs)", run_count);
+ fprintf(stderr, ":\n\n");
for (counter = 0; counter < nr_counters; counter++)
print_counter(counter);
fprintf(stderr, "\n");
- fprintf(stderr, " Wall-clock time elapsed: %12.6f msecs\n",
- (double)(t1-t0)/1e6);
+ fprintf(stderr, " %14.9f seconds time elapsed.\n",
+ (double)walltime_nsecs_avg/1e9);
fprintf(stderr, "\n");
-
- return 0;
}
static volatile int signr = -1;
@@ -332,11 +471,15 @@
"scale/normalize counters"),
OPT_BOOLEAN('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
+ OPT_INTEGER('r', "repeat", &run_count,
+ "repeat command and print average + stddev (max: 100)"),
OPT_END()
};
int cmd_stat(int argc, const char **argv, const char *prefix)
{
+ int status;
+
page_size = sysconf(_SC_PAGE_SIZE);
memcpy(attrs, default_attrs, sizeof(attrs));
@@ -344,6 +487,8 @@
argc = parse_options(argc, argv, options, stat_usage, 0);
if (!argc)
usage_with_options(stat_usage, options);
+ if (run_count <= 0 || run_count > MAX_RUN)
+ usage_with_options(stat_usage, options);
if (!nr_counters)
nr_counters = 8;
@@ -363,5 +508,14 @@
signal(SIGALRM, skip_signal);
signal(SIGABRT, skip_signal);
- return do_perf_stat(argc, argv);
+ status = 0;
+ for (run_idx = 0; run_idx < run_count; run_idx++) {
+ if (run_count != 1 && verbose)
+ fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx+1);
+ status = run_perf_stat(argc, argv);
+ }
+
+ print_stat(argc, argv);
+
+ return status;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fe338d3..5352b5e 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -54,7 +54,7 @@
static int default_interval = 100000;
-static __u64 count_filter = 5;
+static u64 count_filter = 5;
static int print_entries = 15;
static int target_pid = -1;
@@ -79,8 +79,8 @@
* Symbols
*/
-static __u64 min_ip;
-static __u64 max_ip = -1ll;
+static u64 min_ip;
+static u64 max_ip = -1ll;
struct sym_entry {
struct rb_node rb_node;
@@ -194,7 +194,7 @@
100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
if (nr_counters == 1) {
- printf("%Ld", attrs[0].sample_period);
+ printf("%Ld", (u64)attrs[0].sample_period);
if (freq)
printf("Hz ");
else
@@ -372,7 +372,7 @@
/*
* Binary search in the histogram table and record the hit:
*/
-static void record_ip(__u64 ip, int counter)
+static void record_ip(u64 ip, int counter)
{
struct symbol *sym = dso__find_symbol(kernel_dso, ip);
@@ -392,7 +392,7 @@
samples--;
}
-static void process_event(__u64 ip, int counter)
+static void process_event(u64 ip, int counter)
{
samples++;
@@ -463,15 +463,15 @@
for (; old != head;) {
struct ip_event {
struct perf_event_header header;
- __u64 ip;
- __u32 pid, target_pid;
+ u64 ip;
+ u32 pid, target_pid;
};
struct mmap_event {
struct perf_event_header header;
- __u32 pid, target_pid;
- __u64 start;
- __u64 len;
- __u64 pgoff;
+ u32 pid, target_pid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
char filename[PATH_MAX];
};
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 87a1aca..ceb68aa 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -13,12 +13,19 @@
#define cpu_relax() asm volatile ("" ::: "memory");
#endif
+#ifdef __s390__
+#include "../../arch/s390/include/asm/unistd.h"
+#define rmb() asm volatile("bcr 15,0" ::: "memory")
+#define cpu_relax() asm volatile("" ::: "memory");
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include "../../include/linux/perf_counter.h"
+#include "types.h"
/*
* prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
@@ -65,4 +72,10 @@
#define MAX_COUNTERS 256
#define MAX_NR_CPUS 256
+struct perf_file_header {
+ u64 version;
+ u64 sample_type;
+ u64 data_size;
+};
+
#endif
diff --git a/tools/perf/types.h b/tools/perf/types.h
new file mode 100644
index 0000000..5e75f90
--- /dev/null
+++ b/tools/perf/types.h
@@ -0,0 +1,17 @@
+#ifndef _PERF_TYPES_H
+#define _PERF_TYPES_H
+
+/*
+ * We define u64 as unsigned long long for every architecture
+ * so that we can print it with %Lx without getting warnings.
+ */
+typedef unsigned long long u64;
+typedef signed long long s64;
+typedef unsigned int u32;
+typedef signed int s32;
+typedef unsigned short u16;
+typedef signed short s16;
+typedef unsigned char u8;
+typedef signed char s8;
+
+#endif /* _PERF_TYPES_H */
diff --git a/tools/perf/util/ctype.c b/tools/perf/util/ctype.c
index b90ec00..0b791bd 100644
--- a/tools/perf/util/ctype.c
+++ b/tools/perf/util/ctype.c
@@ -11,16 +11,21 @@
D = GIT_DIGIT,
G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */
R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | * */
+ P = GIT_PRINT_EXTRA, /* printable - alpha - digit - glob - regex */
+
+ PS = GIT_SPACE | GIT_PRINT_EXTRA,
};
unsigned char sane_ctype[256] = {
+/* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
+
0, 0, 0, 0, 0, 0, 0, 0, 0, S, S, 0, 0, S, 0, 0, /* 0.. 15 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 16.. 31 */
- S, 0, 0, 0, R, 0, 0, 0, R, R, G, R, 0, 0, R, 0, /* 32.. 47 */
- D, D, D, D, D, D, D, D, D, D, 0, 0, 0, 0, 0, G, /* 48.. 63 */
- 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */
- A, A, A, A, A, A, A, A, A, A, A, G, G, 0, R, 0, /* 80.. 95 */
- 0, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */
- A, A, A, A, A, A, A, A, A, A, A, R, R, 0, 0, 0, /* 112..127 */
+ PS,P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */
+ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */
+ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */
+ A, A, A, A, A, A, A, A, A, A, A, G, G, P, R, P, /* 80.. 95 */
+ P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */
+ A, A, A, A, A, A, A, A, A, A, A, R, R, P, P, 0, /* 112..127 */
/* Nothing in the 128.. range */
};
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5a72586..35d04da 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -13,8 +13,8 @@
struct perf_counter_attr attrs[MAX_COUNTERS];
struct event_symbol {
- __u8 type;
- __u64 config;
+ u8 type;
+ u64 config;
char *symbol;
};
@@ -63,8 +63,8 @@
};
static char *sw_event_names[] = {
- "cpu-clock-ticks",
- "task-clock-ticks",
+ "cpu-clock-msecs",
+ "task-clock-msecs",
"page-faults",
"context-switches",
"CPU-migrations",
@@ -96,7 +96,7 @@
char *event_name(int counter)
{
- __u64 config = attrs[counter].config;
+ u64 config = attrs[counter].config;
int type = attrs[counter].type;
static char buf[32];
@@ -112,7 +112,7 @@
return "unknown-hardware";
case PERF_TYPE_HW_CACHE: {
- __u8 cache_type, cache_op, cache_result;
+ u8 cache_type, cache_op, cache_result;
static char name[100];
cache_type = (config >> 0) & 0xff;
@@ -202,7 +202,7 @@
*/
static int parse_event_symbols(const char *str, struct perf_counter_attr *attr)
{
- __u64 config, id;
+ u64 config, id;
int type;
unsigned int i;
const char *sep, *pstr;
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index ec33c0c..c93eca9 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -15,7 +15,7 @@
* While we find nice hex chars, build a long_val.
* Return number of chars processed.
*/
-int hex2u64(const char *ptr, __u64 *long_val)
+int hex2u64(const char *ptr, u64 *long_val)
{
const char *p = ptr;
*long_val = 0;
diff --git a/tools/perf/util/string.h b/tools/perf/util/string.h
index 72812c1..37b0325 100644
--- a/tools/perf/util/string.h
+++ b/tools/perf/util/string.h
@@ -1,8 +1,8 @@
#ifndef _PERF_STRING_H_
#define _PERF_STRING_H_
-#include <linux/types.h>
+#include "../types.h"
-int hex2u64(const char *ptr, __u64 *val);
+int hex2u64(const char *ptr, u64 *val);
#endif
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 49a55f8..86e1437 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -9,9 +9,9 @@
const char *sym_hist_filter;
-static struct symbol *symbol__new(__u64 start, __u64 len,
+static struct symbol *symbol__new(u64 start, u64 len,
const char *name, unsigned int priv_size,
- __u64 obj_start, int verbose)
+ u64 obj_start, int verbose)
{
size_t namelen = strlen(name) + 1;
struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen);
@@ -21,14 +21,14 @@
if (verbose >= 2)
printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n",
- (__u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start);
+ (u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start);
self->obj_start= obj_start;
self->hist = NULL;
self->hist_sum = 0;
if (sym_hist_filter && !strcmp(name, sym_hist_filter))
- self->hist = calloc(sizeof(__u64), len);
+ self->hist = calloc(sizeof(u64), len);
if (priv_size) {
memset(self, 0, priv_size);
@@ -89,7 +89,7 @@
{
struct rb_node **p = &self->syms.rb_node;
struct rb_node *parent = NULL;
- const __u64 ip = sym->start;
+ const u64 ip = sym->start;
struct symbol *s;
while (*p != NULL) {
@@ -104,7 +104,7 @@
rb_insert_color(&sym->rb_node, &self->syms);
}
-struct symbol *dso__find_symbol(struct dso *self, __u64 ip)
+struct symbol *dso__find_symbol(struct dso *self, u64 ip)
{
struct rb_node *n;
@@ -151,7 +151,7 @@
goto out_failure;
while (!feof(file)) {
- __u64 start;
+ u64 start;
struct symbol *sym;
int line_len, len;
char symbol_type;
@@ -232,7 +232,7 @@
goto out_failure;
while (!feof(file)) {
- __u64 start, size;
+ u64 start, size;
struct symbol *sym;
int line_len, len;
@@ -353,7 +353,7 @@
{
uint32_t nr_rel_entries, idx;
GElf_Sym sym;
- __u64 plt_offset;
+ u64 plt_offset;
GElf_Shdr shdr_plt;
struct symbol *f;
GElf_Shdr shdr_rel_plt;
@@ -523,7 +523,7 @@
elf_symtab__for_each_symbol(syms, nr_syms, index, sym) {
struct symbol *f;
- __u64 obj_start;
+ u64 obj_start;
if (!elf_sym__is_function(&sym))
continue;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 0d1292b..ea332e5 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -2,16 +2,18 @@
#define _PERF_SYMBOL_ 1
#include <linux/types.h>
+#include "../types.h"
#include "list.h"
#include "rbtree.h"
struct symbol {
struct rb_node rb_node;
- __u64 start;
- __u64 end;
- __u64 obj_start;
- __u64 hist_sum;
- __u64 *hist;
+ u64 start;
+ u64 end;
+ u64 obj_start;
+ u64 hist_sum;
+ u64 *hist;
+ void *priv;
char name[0];
};
@@ -19,7 +21,7 @@
struct list_head node;
struct rb_root syms;
unsigned int sym_priv_size;
- struct symbol *(*find_symbol)(struct dso *, __u64 ip);
+ struct symbol *(*find_symbol)(struct dso *, u64 ip);
char name[0];
};
@@ -35,7 +37,7 @@
return ((void *)sym) - self->sym_priv_size;
}
-struct symbol *dso__find_symbol(struct dso *self, __u64 ip);
+struct symbol *dso__find_symbol(struct dso *self, u64 ip);
int dso__load_kernel(struct dso *self, const char *vmlinux,
symbol_filter_t filter, int verbose);
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 76590a1..b8cfed7 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -100,11 +100,6 @@
#include <iconv.h>
#endif
-#ifndef NO_OPENSSL
-#include <openssl/ssl.h>
-#include <openssl/err.h>
-#endif
-
/* On most systems <limits.h> would have given us this, but
* not on some systems (e.g. GNU/Hurd).
*/
@@ -332,17 +327,20 @@
#undef tolower
#undef toupper
extern unsigned char sane_ctype[256];
-#define GIT_SPACE 0x01
-#define GIT_DIGIT 0x02
-#define GIT_ALPHA 0x04
-#define GIT_GLOB_SPECIAL 0x08
-#define GIT_REGEX_SPECIAL 0x10
+#define GIT_SPACE 0x01
+#define GIT_DIGIT 0x02
+#define GIT_ALPHA 0x04
+#define GIT_GLOB_SPECIAL 0x08
+#define GIT_REGEX_SPECIAL 0x10
+#define GIT_PRINT_EXTRA 0x20
+#define GIT_PRINT 0x3E
#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0)
#define isascii(x) (((x) & ~0x7f) == 0)
#define isspace(x) sane_istest(x,GIT_SPACE)
#define isdigit(x) sane_istest(x,GIT_DIGIT)
#define isalpha(x) sane_istest(x,GIT_ALPHA)
#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
+#define isprint(x) sane_istest(x,GIT_PRINT)
#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL)
#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL)
#define tolower(x) sane_case((unsigned char)(x), 0x20)