Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, delay: tsc based udelay should have rdtsc_barrier
x86, setup: correct include file in <asm/boot.h>
x86, setup: Fix typo "CONFIG_x86_64" in <asm/boot.h>
x86, mce: percpu mcheck_timer should be pinned
x86: Add sysctl to allow panic on IOCK NMI error
x86: Fix uv bau sending buffer initialization
x86, mce: Fix mce resume on 32bit
x86: Move init_gbpages() to setup_arch()
x86: ensure percpu lpage doesn't consume too much vmalloc space
x86: implement percpu_alloc kernel parameter
x86: fix pageattr handling for lpage percpu allocator and re-enable it
x86: reorganize cpa_process_alias()
x86: prepare setup_pcpu_lpage() for pageattr fix
x86: rename remap percpu first chunk allocator to lpage
x86: fix duplicate free in setup_pcpu_remap() failure path
percpu: fix too lazy vunmap cache flushing
x86: Set cpu_llc_id on AMD CPUs
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 97ad190..6bf6805 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -122,3 +122,10 @@
This symbolic link appears when a device is a Virtual Function.
The symbolic link points to the PCI device sysfs entry of the
Physical Function this device associates with.
+
+What: /sys/bus/pci/slots/.../module
+Date: June 2009
+Contact: linux-pci@vger.kernel.org
+Description:
+ This symbolic link points to the PCI hotplug controller driver
+ module that manages the hotplug slot.
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
new file mode 100644
index 0000000..4d55a18
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -0,0 +1,125 @@
+What: /sys/class/mtd/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ The mtd/ class subdirectory belongs to the MTD subsystem
+ (MTD core).
+
+What: /sys/class/mtd/mtdX/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ The /sys/class/mtd/mtd{0,1,2,3,...} directories correspond
+ to each /dev/mtdX character device. These may represent
+ physical/simulated flash devices, partitions on a flash
+ device, or concatenated flash devices. They exist regardless
+ of whether CONFIG_MTD_CHAR is actually enabled.
+
+What: /sys/class/mtd/mtdXro/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ These directories provide the corresponding read-only device
+ nodes for /sys/class/mtd/mtdX/ . They are only created
+ (for the benefit of udev) if CONFIG_MTD_CHAR is enabled.
+
+What: /sys/class/mtd/mtdX/dev
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Major and minor numbers of the character device corresponding
+ to this MTD device (in <major>:<minor> format). This is the
+ read-write device so <minor> will be even.
+
+What: /sys/class/mtd/mtdXro/dev
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Major and minor numbers of the character device corresponding
+ to the read-only variant of thie MTD device (in
+ <major>:<minor> format). In this case <minor> will be odd.
+
+What: /sys/class/mtd/mtdX/erasesize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ "Major" erase size for the device. If numeraseregions is
+ zero, this is the eraseblock size for the entire device.
+ Otherwise, the MEMGETREGIONCOUNT/MEMGETREGIONINFO ioctls
+ can be used to determine the actual eraseblock layout.
+
+What: /sys/class/mtd/mtdX/flags
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ A hexadecimal value representing the device flags, ORed
+ together:
+
+ 0x0400: MTD_WRITEABLE - device is writable
+ 0x0800: MTD_BIT_WRITEABLE - single bits can be flipped
+ 0x1000: MTD_NO_ERASE - no erase necessary
+ 0x2000: MTD_POWERUP_LOCK - always locked after reset
+
+What: /sys/class/mtd/mtdX/name
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ A human-readable ASCII name for the device or partition.
+ This will match the name in /proc/mtd .
+
+What: /sys/class/mtd/mtdX/numeraseregions
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ For devices that have variable eraseblock sizes, this
+ provides the total number of erase regions. Otherwise,
+ it will read back as zero.
+
+What: /sys/class/mtd/mtdX/oobsize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Number of OOB bytes per page.
+
+What: /sys/class/mtd/mtdX/size
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Total size of the device/partition, in bytes.
+
+What: /sys/class/mtd/mtdX/type
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ One of the following ASCII strings, representing the device
+ type:
+
+ absent, ram, rom, nor, nand, dataflash, ubi, unknown
+
+What: /sys/class/mtd/mtdX/writesize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Minimal writable flash unit size. This will always be
+ a positive integer.
+
+ In the case of NOR flash it is 1 (even though individual
+ bits can be cleared).
+
+ In the case of NAND flash it is one NAND page (or a
+ half page, or a quarter page).
+
+ In the case of ECC NOR, it is the ECC block size.
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index ddeb14b..be21001 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -61,6 +61,10 @@
walkaround, pls. add aerdriver.forceload=y to kernel boot parameter line
when booting kernel. Note that forceload=n by default.
+nosourceid, another parameter of type bool, can be used when broken
+hardware (mostly chipsets) has root ports that cannot obtain the reporting
+source ID. nosourceid=n by default.
+
2.3 AER error output
When a PCI-E AER error is captured, an error message will be outputed to
console. If it's a correctable error, it is outputed as a warning.
@@ -246,3 +250,24 @@
A: It could call the helper functions to enable AER in devices and
cleanup uncorrectable status register. Pls. refer to section 3.3.
+
+4. Software error injection
+
+Debugging PCIE AER error recovery code is quite difficult because it
+is hard to trigger real hardware errors. Software based error
+injection can be used to fake various kinds of PCIE errors.
+
+First you should enable PCIE AER software error injection in kernel
+configuration, that is, following item should be in your .config.
+
+CONFIG_PCIEAER_INJECT=y or CONFIG_PCIEAER_INJECT=m
+
+After reboot with new kernel or insert the module, a device file named
+/dev/aer_inject should be created.
+
+Then, you need a user space tool named aer-inject, which can be gotten
+from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+
+More information about aer-inject can be found in the document comes
+with its source code.
diff --git a/Documentation/device-mapper/dm-log.txt b/Documentation/device-mapper/dm-log.txt
new file mode 100644
index 0000000..994dd75
--- /dev/null
+++ b/Documentation/device-mapper/dm-log.txt
@@ -0,0 +1,54 @@
+Device-Mapper Logging
+=====================
+The device-mapper logging code is used by some of the device-mapper
+RAID targets to track regions of the disk that are not consistent.
+A region (or portion of the address space) of the disk may be
+inconsistent because a RAID stripe is currently being operated on or
+a machine died while the region was being altered. In the case of
+mirrors, a region would be considered dirty/inconsistent while you
+are writing to it because the writes need to be replicated for all
+the legs of the mirror and may not reach the legs at the same time.
+Once all writes are complete, the region is considered clean again.
+
+There is a generic logging interface that the device-mapper RAID
+implementations use to perform logging operations (see
+dm_dirty_log_type in include/linux/dm-dirty-log.h). Various different
+logging implementations are available and provide different
+capabilities. The list includes:
+
+Type Files
+==== =====
+disk drivers/md/dm-log.c
+core drivers/md/dm-log.c
+userspace drivers/md/dm-log-userspace* include/linux/dm-log-userspace.h
+
+The "disk" log type
+-------------------
+This log implementation commits the log state to disk. This way, the
+logging state survives reboots/crashes.
+
+The "core" log type
+-------------------
+This log implementation keeps the log state in memory. The log state
+will not survive a reboot or crash, but there may be a small boost in
+performance. This method can also be used if no storage device is
+available for storing log state.
+
+The "userspace" log type
+------------------------
+This log type simply provides a way to export the log API to userspace,
+so log implementations can be done there. This is done by forwarding most
+logging requests to userspace, where a daemon receives and processes the
+request.
+
+The structure used for communication between kernel and userspace are
+located in include/linux/dm-log-userspace.h. Due to the frequency,
+diversity, and 2-way communication nature of the exchanges between
+kernel and userspace, 'connector' is used as the interface for
+communication.
+
+There are currently two userspace log implementations that leverage this
+framework - "clustered_disk" and "clustered_core". These implementations
+provide a cluster-coherent log for shared-storage. Device-mapper mirroring
+can be used in a shared-storage environment when the cluster log implementations
+are employed.
diff --git a/Documentation/device-mapper/dm-queue-length.txt b/Documentation/device-mapper/dm-queue-length.txt
new file mode 100644
index 0000000..f4db256
--- /dev/null
+++ b/Documentation/device-mapper/dm-queue-length.txt
@@ -0,0 +1,39 @@
+dm-queue-length
+===============
+
+dm-queue-length is a path selector module for device-mapper targets,
+which selects a path with the least number of in-flight I/Os.
+The path selector name is 'queue-length'.
+
+Table parameters for each path: [<repeat_count>]
+ <repeat_count>: The number of I/Os to dispatch using the selected
+ path before switching to the next path.
+ If not given, internal default is used. To check
+ the default value, see the activated table.
+
+Status for each path: <status> <fail-count> <in-flight>
+ <status>: 'A' if the path is active, 'F' if the path is failed.
+ <fail-count>: The number of path failures.
+ <in-flight>: The number of in-flight I/Os on the path.
+
+
+Algorithm
+=========
+
+dm-queue-length increments/decrements 'in-flight' when an I/O is
+dispatched/completed respectively.
+dm-queue-length selects a path with the minimum 'in-flight'.
+
+
+Examples
+========
+In case that 2 paths (sda and sdb) are used with repeat_count == 128.
+
+# echo "0 10 multipath 0 0 1 1 queue-length 0 2 1 8:0 128 8:16 128" \
+ dmsetup create test
+#
+# dmsetup table
+test: 0 10 multipath 0 0 1 1 queue-length 0 2 1 8:0 128 8:16 128
+#
+# dmsetup status
+test: 0 10 multipath 2 0 0 0 1 1 E 0 2 1 8:0 A 0 0 8:16 A 0 0
diff --git a/Documentation/device-mapper/dm-service-time.txt b/Documentation/device-mapper/dm-service-time.txt
new file mode 100644
index 0000000..7d00668
--- /dev/null
+++ b/Documentation/device-mapper/dm-service-time.txt
@@ -0,0 +1,91 @@
+dm-service-time
+===============
+
+dm-service-time is a path selector module for device-mapper targets,
+which selects a path with the shortest estimated service time for
+the incoming I/O.
+
+The service time for each path is estimated by dividing the total size
+of in-flight I/Os on a path with the performance value of the path.
+The performance value is a relative throughput value among all paths
+in a path-group, and it can be specified as a table argument.
+
+The path selector name is 'service-time'.
+
+Table parameters for each path: [<repeat_count> [<relative_throughput>]]
+ <repeat_count>: The number of I/Os to dispatch using the selected
+ path before switching to the next path.
+ If not given, internal default is used. To check
+ the default value, see the activated table.
+ <relative_throughput>: The relative throughput value of the path
+ among all paths in the path-group.
+ The valid range is 0-100.
+ If not given, minimum value '1' is used.
+ If '0' is given, the path isn't selected while
+ other paths having a positive value are available.
+
+Status for each path: <status> <fail-count> <in-flight-size> \
+ <relative_throughput>
+ <status>: 'A' if the path is active, 'F' if the path is failed.
+ <fail-count>: The number of path failures.
+ <in-flight-size>: The size of in-flight I/Os on the path.
+ <relative_throughput>: The relative throughput value of the path
+ among all paths in the path-group.
+
+
+Algorithm
+=========
+
+dm-service-time adds the I/O size to 'in-flight-size' when the I/O is
+dispatched and substracts when completed.
+Basically, dm-service-time selects a path having minimum service time
+which is calculated by:
+
+ ('in-flight-size' + 'size-of-incoming-io') / 'relative_throughput'
+
+However, some optimizations below are used to reduce the calculation
+as much as possible.
+
+ 1. If the paths have the same 'relative_throughput', skip
+ the division and just compare the 'in-flight-size'.
+
+ 2. If the paths have the same 'in-flight-size', skip the division
+ and just compare the 'relative_throughput'.
+
+ 3. If some paths have non-zero 'relative_throughput' and others
+ have zero 'relative_throughput', ignore those paths with zero
+ 'relative_throughput'.
+
+If such optimizations can't be applied, calculate service time, and
+compare service time.
+If calculated service time is equal, the path having maximum
+'relative_throughput' may be better. So compare 'relative_throughput'
+then.
+
+
+Examples
+========
+In case that 2 paths (sda and sdb) are used with repeat_count == 128
+and sda has an average throughput 1GB/s and sdb has 4GB/s,
+'relative_throughput' value may be '1' for sda and '4' for sdb.
+
+# echo "0 10 multipath 0 0 1 1 service-time 0 2 2 8:0 128 1 8:16 128 4" \
+ dmsetup create test
+#
+# dmsetup table
+test: 0 10 multipath 0 0 1 1 service-time 0 2 2 8:0 128 1 8:16 128 4
+#
+# dmsetup status
+test: 0 10 multipath 2 0 0 0 1 1 E 0 2 2 8:0 A 0 0 1 8:16 A 0 0 4
+
+
+Or '2' for sda and '8' for sdb would be also true.
+
+# echo "0 10 multipath 0 0 1 1 service-time 0 2 2 8:0 128 2 8:16 128 8" \
+ dmsetup create test
+#
+# dmsetup table
+test: 0 10 multipath 0 0 1 1 service-time 0 2 2 8:0 128 2 8:16 128 8
+#
+# dmsetup status
+test: 0 10 multipath 2 0 0 0 1 1 E 0 2 2 8:0 A 0 0 2 8:16 A 0 0 8
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 8dd6db7..f15621e 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -66,6 +66,10 @@
- info on the Linux implementation of Sys V mandatory file locking.
ncpfs.txt
- info on Novell Netware(tm) filesystem using NCP protocol.
+nfs41-server.txt
+ - info on the Linux server implementation of NFSv4 minor version 1.
+nfs-rdma.txt
+ - how to install and setup the Linux NFS/RDMA client and server software.
nfsroot.txt
- short guide on setting up a diskless box with NFS root filesystem.
nilfs2.txt
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 229d7b7..18b9d0c 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -109,27 +109,28 @@
locking rules:
All may block.
- BKL s_lock s_umount
-alloc_inode: no no no
-destroy_inode: no
-dirty_inode: no (must not sleep)
-write_inode: no
-drop_inode: no !!!inode_lock!!!
-delete_inode: no
-put_super: yes yes no
-write_super: no yes read
-sync_fs: no no read
-freeze_fs: ?
-unfreeze_fs: ?
-statfs: no no no
-remount_fs: yes yes maybe (see below)
-clear_inode: no
-umount_begin: yes no no
-show_options: no (vfsmount->sem)
-quota_read: no no no (see below)
-quota_write: no no no (see below)
+ None have BKL
+ s_umount
+alloc_inode:
+destroy_inode:
+dirty_inode: (must not sleep)
+write_inode:
+drop_inode: !!!inode_lock!!!
+delete_inode:
+put_super: write
+write_super: read
+sync_fs: read
+freeze_fs: read
+unfreeze_fs: read
+statfs: no
+remount_fs: maybe (see below)
+clear_inode:
+umount_begin: no
+show_options: no (namespace_sem)
+quota_read: no (see below)
+quota_write: no (see below)
-->remount_fs() will have the s_umount lock if it's already mounted.
+->remount_fs() will have the s_umount exclusive lock if it's already mounted.
When called from get_sb_single, it does NOT have the s_umount lock.
->quota_read() and ->quota_write() functions are both guaranteed to
be the only ones operating on the quota file by the quota code (via
diff --git a/Documentation/isdn/00-INDEX b/Documentation/isdn/00-INDEX
index f6010a5..e87e336 100644
--- a/Documentation/isdn/00-INDEX
+++ b/Documentation/isdn/00-INDEX
@@ -14,25 +14,14 @@
- general info on what you need and what to do for Linux ISDN.
README.FAQ
- general info for FAQ.
-README.audio
- - info for running audio over ISDN.
-README.fax
- - info for using Fax over ISDN.
-README.gigaset
- - info on the drivers for Siemens Gigaset ISDN adapters.
-README.icn
- - info on the ICN-ISDN-card and its driver.
->>>>>>> 93af7aca44f0e82e67bda10a0fb73d383edcc8bd:Documentation/isdn/00-INDEX
README.HiSax
- info on the HiSax driver which replaces the old teles.
+README.act2000
+ - info on driver for IBM ACT-2000 card.
README.audio
- info for running audio over ISDN.
README.avmb1
- info on driver for AVM-B1 ISDN card.
-README.act2000
- - info on driver for IBM ACT-2000 card.
-README.eicon
- - info on driver for Eicon active cards.
README.concap
- info on "CONCAP" encapsulation protocol interface used for X.25.
README.diversion
@@ -59,7 +48,3 @@
- info for running X.25 over ISDN.
syncPPP.FAQ
- frequently asked questions about running PPP over ISDN.
-README.hysdn
- - info on driver for Hypercope active HYSDN cards
-README.mISDN
- - info on the Modular ISDN subsystem (mISDN).
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ecad946..d77fbd8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -229,14 +229,6 @@
to assume that this machine's pmtimer latches its value
and always returns good values.
- acpi.power_nocheck= [HW,ACPI]
- Format: 1/0 enable/disable the check of power state.
- On some bogus BIOS the _PSC object/_STA object of
- power resource can't return the correct device power
- state. In such case it is unneccessary to check its
- power state again in power transition.
- 1 : disable the power state check
-
acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode
Format: { level | edge | high | low }
@@ -1006,6 +998,7 @@
nomerge
forcesac
soft
+ pt [x86, IA64]
io7= [HW] IO7 for Marvel based alpha systems
See comment before marvel_specify_io7 in
@@ -1369,6 +1362,27 @@
min_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory below this
physical address is ignored.
+ mini2440= [ARM,HW,KNL]
+ Format:[0..2][b][c][t]
+ Default: "0tb"
+ MINI2440 configuration specification:
+ 0 - The attached screen is the 3.5" TFT
+ 1 - The attached screen is the 7" TFT
+ 2 - The VGA Shield is attached (1024x768)
+ Leaving out the screen size parameter will not load
+ the TFT driver, and the framebuffer will be left
+ unconfigured.
+ b - Enable backlight. The TFT backlight pin will be
+ linked to the kernel VESA blanking code and a GPIO
+ LED. This parameter is not necessary when using the
+ VGA shield.
+ c - Enable the s3c camera interface.
+ t - Reserved for enabling touchscreen support. The
+ touchscreen support is not enabled in the mainstream
+ kernel as of 2.6.30, a preliminary port can be found
+ in the "bleeding edge" mini2440 support kernel at
+ http://repo.or.cz/w/linux-2.6/mini2440.git
+
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
parameter allows control of the logging verbosity for
@@ -1410,6 +1424,16 @@
mtdparts= [MTD]
See drivers/mtd/cmdlinepart.c.
+ onenand.bdry= [HW,MTD] Flex-OneNAND Boundary Configuration
+
+ Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
+
+ boundary - index of last SLC block on Flex-OneNAND.
+ The remaining blocks are configured as MLC blocks.
+ lock - Configure if Flex-OneNAND boundary should be locked.
+ Once locked, the boundary cannot be changed.
+ 1 indicates lock status, 0 indicates unlock status.
+
mtdset= [ARM]
ARM/S3C2412 JIVE boot control
@@ -1776,6 +1800,9 @@
root domains (aka PCI segments, in ACPI-speak).
nommconf [X86] Disable use of MMCONFIG for PCI
Configuration
+ check_enable_amd_mmconf [X86] check for and enable
+ properly configured MMIO access to PCI
+ config space on AMD family 10h CPU
nomsi [MSI] If the PCI_MSI kernel config parameter is
enabled, this kernel boot option can be used to
disable the use of MSI interrupts system-wide.
@@ -1865,6 +1892,12 @@
PAGE_SIZE is used as alignment.
PCI-PCI bridge can be specified, if resource
windows need to be expanded.
+ ecrc= Enable/disable PCIe ECRC (transaction layer
+ end-to-end CRC checking).
+ bios: Use BIOS/firmware settings. This is the
+ the default.
+ off: Turn ECRC off
+ on: Turn ECRC on.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
@@ -2440,7 +2473,8 @@
tp720= [HW,PS2]
- trace_buf_size=nn[KMG] [ftrace] will set tracing buffer size.
+ trace_buf_size=nn[KMG]
+ [FTRACE] will set tracing buffer size.
trix= [HW,OSS] MediaTrix AudioTrix Pro
Format:
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 78e354b..f2296ec 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -920,7 +920,7 @@
echo '<LED number> off' >/proc/acpi/ibm/led
echo '<LED number> blink' >/proc/acpi/ibm/led
-The <LED number> range is 0 to 7. The set of LEDs that can be
+The <LED number> range is 0 to 15. The set of LEDs that can be
controlled varies from model to model. Here is the common ThinkPad
mapping:
@@ -932,6 +932,11 @@
5 - UltraBase battery slot
6 - (unknown)
7 - standby
+ 8 - dock status 1
+ 9 - dock status 2
+ 10, 11 - (unknown)
+ 12 - thinkvantage
+ 13, 14, 15 - (unknown)
All of the above can be turned on and off and can be made to blink.
@@ -940,10 +945,12 @@
The ThinkPad LED sysfs interface is described in detail by the LED class
documentation, in Documentation/leds-class.txt.
-The leds are named (in LED ID order, from 0 to 7):
+The LEDs are named (in LED ID order, from 0 to 12):
"tpacpi::power", "tpacpi:orange:batt", "tpacpi:green:batt",
"tpacpi::dock_active", "tpacpi::bay_active", "tpacpi::dock_batt",
-"tpacpi::unknown_led", "tpacpi::standby".
+"tpacpi::unknown_led", "tpacpi::standby", "tpacpi::dock_status1",
+"tpacpi::dock_status2", "tpacpi::unknown_led2", "tpacpi::unknown_led3",
+"tpacpi::thinkvantage".
Due to limitations in the sysfs LED class, if the status of the LED
indicators cannot be read due to an error, thinkpad-acpi will report it as
@@ -958,6 +965,12 @@
"timer" trigger, and leave the delay_on and delay_off parameters set to
zero (to request hardware acceleration autodetection).
+LEDs that are known not to exist in a given ThinkPad model are not
+made available through the sysfs interface. If you have a dock and you
+notice there are LEDs listed for your ThinkPad that do not exist (and
+are not in the dock), or if you notice that there are missing LEDs,
+a report to ibm-acpi-devel@lists.sourceforge.net is appreciated.
+
ACPI sounds -- /proc/acpi/ibm/beep
----------------------------------
@@ -1156,17 +1169,19 @@
display backlight brightness control methods have 16 levels, ranging
from 0 to 15.
-There are two interfaces to the firmware for direct brightness control,
-EC and UCMS (or CMOS). To select which one should be used, use the
-brightness_mode module parameter: brightness_mode=1 selects EC mode,
-brightness_mode=2 selects UCMS mode, brightness_mode=3 selects EC
-mode with NVRAM backing (so that brightness changes are remembered
-across shutdown/reboot).
+For IBM ThinkPads, there are two interfaces to the firmware for direct
+brightness control, EC and UCMS (or CMOS). To select which one should be
+used, use the brightness_mode module parameter: brightness_mode=1 selects
+EC mode, brightness_mode=2 selects UCMS mode, brightness_mode=3 selects EC
+mode with NVRAM backing (so that brightness changes are remembered across
+shutdown/reboot).
The driver tries to select which interface to use from a table of
defaults for each ThinkPad model. If it makes a wrong choice, please
report this as a bug, so that we can fix it.
+Lenovo ThinkPads only support brightness_mode=2 (UCMS).
+
When display backlight brightness controls are available through the
standard ACPI interface, it is best to use it instead of this direct
ThinkPad-specific interface. The driver will disable its native
@@ -1254,7 +1269,7 @@
procfs: /proc/acpi/ibm/fan
sysfs device attributes: (hwmon "thinkpad") fan1_input, pwm1,
- pwm1_enable
+ pwm1_enable, fan2_input
sysfs hwmon driver attributes: fan_watchdog
NOTE NOTE NOTE: fan control operations are disabled by default for
@@ -1267,6 +1282,9 @@
to work on later R, T, X and Z series ThinkPads but may show a bogus
value on other models.
+Some Lenovo ThinkPads support a secondary fan. This fan cannot be
+controlled separately, it shares the main fan control.
+
Fan levels:
Most ThinkPad fans work in "levels" at the firmware interface. Level 0
@@ -1397,6 +1415,11 @@
which can take up to two minutes. May return rubbish on older
ThinkPads.
+hwmon device attribute fan2_input:
+ Fan tachometer reading, in RPM, for the secondary fan.
+ Available only on some ThinkPads. If the secondary fan is
+ not installed, will always read 0.
+
hwmon driver attribute fan_watchdog:
Fan safety watchdog timer interval, in seconds. Minimum is
1 second, maximum is 120 seconds. 0 disables the watchdog.
@@ -1555,3 +1578,7 @@
0x020300: hotkey enable/disable support removed, attributes
hotkey_bios_enabled and hotkey_enable deprecated and
marked for removal.
+
+0x020400: Marker for 16 LEDs support. Also, LEDs that are known
+ to not exist in a given model are not registered with
+ the LED sysfs class anymore.
diff --git a/Documentation/leds-lp3944.txt b/Documentation/leds-lp3944.txt
new file mode 100644
index 0000000..c6eda18
--- /dev/null
+++ b/Documentation/leds-lp3944.txt
@@ -0,0 +1,50 @@
+Kernel driver lp3944
+====================
+
+ * National Semiconductor LP3944 Fun-light Chip
+ Prefix: 'lp3944'
+ Addresses scanned: None (see the Notes section below)
+ Datasheet: Publicly available at the National Semiconductor website
+ http://www.national.com/pf/LP/LP3944.html
+
+Authors:
+ Antonio Ospite <ospite@studenti.unina.it>
+
+
+Description
+-----------
+The LP3944 is a helper chip that can drive up to 8 leds, with two programmable
+DIM modes; it could even be used as a gpio expander but this driver assumes it
+is used as a led controller.
+
+The DIM modes are used to set _blink_ patterns for leds, the pattern is
+specified supplying two parameters:
+ - period: from 0s to 1.6s
+ - duty cycle: percentage of the period the led is on, from 0 to 100
+
+Setting a led in DIM0 or DIM1 mode makes it blink according to the pattern.
+See the datasheet for details.
+
+LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
+leds, the camera flash light and the lcds power.
+
+
+Notes
+-----
+The chip is used mainly in embedded contexts, so this driver expects it is
+registered using the i2c_board_info mechanism.
+
+To register the chip at address 0x60 on adapter 0, set the platform data
+according to include/linux/leds-lp3944.h, set the i2c board info:
+
+ static struct i2c_board_info __initdata a910_i2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("lp3944", 0x60),
+ .platform_data = &a910_lp3944_leds,
+ },
+ };
+
+and register it in the platform init function
+
+ i2c_register_board_info(0, a910_i2c_board_info,
+ ARRAY_SIZE(a910_i2c_board_info));
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 8d999d8..79f533f 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1238,1122 +1238,7 @@
defined; this list will expand as more and more SOC-containing
platforms are moved over to use the flattened-device-tree model.
- a) PHY nodes
-
- Required properties:
-
- - device_type : Should be "ethernet-phy"
- - interrupts : <a b> where a is the interrupt number and b is a
- field that represents an encoding of the sense and level
- information for the interrupt. This should be encoded based on
- the information in section 2) depending on the type of interrupt
- controller you have.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- - reg : The ID number for the phy, usually a small integer
- - linux,phandle : phandle for this node; likely referenced by an
- ethernet controller node.
-
-
- Example:
-
- ethernet-phy@0 {
- linux,phandle = <2452000>
- interrupt-parent = <40000>;
- interrupts = <35 1>;
- reg = <0>;
- device_type = "ethernet-phy";
- };
-
-
- b) Interrupt controllers
-
- Some SOC devices contain interrupt controllers that are different
- from the standard Open PIC specification. The SOC device nodes for
- these types of controllers should be specified just like a standard
- OpenPIC controller. Sense and level information should be encoded
- as specified in section 2) of this chapter for each device that
- specifies an interrupt.
-
- Example :
-
- pic@40000 {
- linux,phandle = <40000>;
- interrupt-controller;
- #address-cells = <0>;
- reg = <40000 40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
-
- c) 4xx/Axon EMAC ethernet nodes
-
- The EMAC ethernet controller in IBM and AMCC 4xx chips, and also
- the Axon bridge. To operate this needs to interact with a ths
- special McMAL DMA controller, and sometimes an RGMII or ZMII
- interface. In addition to the nodes and properties described
- below, the node for the OPB bus on which the EMAC sits must have a
- correct clock-frequency property.
-
- i) The EMAC node itself
-
- Required properties:
- - device_type : "network"
-
- - compatible : compatible list, contains 2 entries, first is
- "ibm,emac-CHIP" where CHIP is the host ASIC (440gx,
- 405gp, Axon) and second is either "ibm,emac" or
- "ibm,emac4". For Axon, thus, we have: "ibm,emac-axon",
- "ibm,emac4"
- - interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ>
- - interrupt-parent : optional, if needed for interrupt mapping
- - reg : <registers mapping>
- - local-mac-address : 6 bytes, MAC address
- - mal-device : phandle of the associated McMAL node
- - mal-tx-channel : 1 cell, index of the tx channel on McMAL associated
- with this EMAC
- - mal-rx-channel : 1 cell, index of the rx channel on McMAL associated
- with this EMAC
- - cell-index : 1 cell, hardware index of the EMAC cell on a given
- ASIC (typically 0x0 and 0x1 for EMAC0 and EMAC1 on
- each Axon chip)
- - max-frame-size : 1 cell, maximum frame size supported in bytes
- - rx-fifo-size : 1 cell, Rx fifo size in bytes for 10 and 100 Mb/sec
- operations.
- For Axon, 2048
- - tx-fifo-size : 1 cell, Tx fifo size in bytes for 10 and 100 Mb/sec
- operations.
- For Axon, 2048.
- - fifo-entry-size : 1 cell, size of a fifo entry (used to calculate
- thresholds).
- For Axon, 0x00000010
- - mal-burst-size : 1 cell, MAL burst size (used to calculate thresholds)
- in bytes.
- For Axon, 0x00000100 (I think ...)
- - phy-mode : string, mode of operations of the PHY interface.
- Supported values are: "mii", "rmii", "smii", "rgmii",
- "tbi", "gmii", rtbi", "sgmii".
- For Axon on CAB, it is "rgmii"
- - mdio-device : 1 cell, required iff using shared MDIO registers
- (440EP). phandle of the EMAC to use to drive the
- MDIO lines for the PHY used by this EMAC.
- - zmii-device : 1 cell, required iff connected to a ZMII. phandle of
- the ZMII device node
- - zmii-channel : 1 cell, required iff connected to a ZMII. Which ZMII
- channel or 0xffffffff if ZMII is only used for MDIO.
- - rgmii-device : 1 cell, required iff connected to an RGMII. phandle
- of the RGMII device node.
- For Axon: phandle of plb5/plb4/opb/rgmii
- - rgmii-channel : 1 cell, required iff connected to an RGMII. Which
- RGMII channel is used by this EMAC.
- Fox Axon: present, whatever value is appropriate for each
- EMAC, that is the content of the current (bogus) "phy-port"
- property.
-
- Optional properties:
- - phy-address : 1 cell, optional, MDIO address of the PHY. If absent,
- a search is performed.
- - phy-map : 1 cell, optional, bitmap of addresses to probe the PHY
- for, used if phy-address is absent. bit 0x00000001 is
- MDIO address 0.
- For Axon it can be absent, though my current driver
- doesn't handle phy-address yet so for now, keep
- 0x00ffffff in it.
- - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
- operations (if absent the value is the same as
- rx-fifo-size). For Axon, either absent or 2048.
- - tx-fifo-size-gige : 1 cell, Tx fifo size in bytes for 1000 Mb/sec
- operations (if absent the value is the same as
- tx-fifo-size). For Axon, either absent or 2048.
- - tah-device : 1 cell, optional. If connected to a TAH engine for
- offload, phandle of the TAH device node.
- - tah-channel : 1 cell, optional. If appropriate, channel used on the
- TAH engine.
-
- Example:
-
- EMAC0: ethernet@40000800 {
- device_type = "network";
- compatible = "ibm,emac-440gp", "ibm,emac";
- interrupt-parent = <&UIC1>;
- interrupts = <1c 4 1d 4>;
- reg = <40000800 70>;
- local-mac-address = [00 04 AC E3 1B 1E];
- mal-device = <&MAL0>;
- mal-tx-channel = <0 1>;
- mal-rx-channel = <0>;
- cell-index = <0>;
- max-frame-size = <5dc>;
- rx-fifo-size = <1000>;
- tx-fifo-size = <800>;
- phy-mode = "rmii";
- phy-map = <00000001>;
- zmii-device = <&ZMII0>;
- zmii-channel = <0>;
- };
-
- ii) McMAL node
-
- Required properties:
- - device_type : "dma-controller"
- - compatible : compatible list, containing 2 entries, first is
- "ibm,mcmal-CHIP" where CHIP is the host ASIC (like
- emac) and the second is either "ibm,mcmal" or
- "ibm,mcmal2".
- For Axon, "ibm,mcmal-axon","ibm,mcmal2"
- - interrupts : <interrupt mapping for the MAL interrupts sources:
- 5 sources: tx_eob, rx_eob, serr, txde, rxde>.
- For Axon: This is _different_ from the current
- firmware. We use the "delayed" interrupts for txeob
- and rxeob. Thus we end up with mapping those 5 MPIC
- interrupts, all level positive sensitive: 10, 11, 32,
- 33, 34 (in decimal)
- - dcr-reg : < DCR registers range >
- - dcr-parent : if needed for dcr-reg
- - num-tx-chans : 1 cell, number of Tx channels
- - num-rx-chans : 1 cell, number of Rx channels
-
- iii) ZMII node
-
- Required properties:
- - compatible : compatible list, containing 2 entries, first is
- "ibm,zmii-CHIP" where CHIP is the host ASIC (like
- EMAC) and the second is "ibm,zmii".
- For Axon, there is no ZMII node.
- - reg : <registers mapping>
-
- iv) RGMII node
-
- Required properties:
- - compatible : compatible list, containing 2 entries, first is
- "ibm,rgmii-CHIP" where CHIP is the host ASIC (like
- EMAC) and the second is "ibm,rgmii".
- For Axon, "ibm,rgmii-axon","ibm,rgmii"
- - reg : <registers mapping>
- - revision : as provided by the RGMII new version register if
- available.
- For Axon: 0x0000012a
-
- d) Xilinx IP cores
-
- The Xilinx EDK toolchain ships with a set of IP cores (devices) for use
- in Xilinx Spartan and Virtex FPGAs. The devices cover the whole range
- of standard device types (network, serial, etc.) and miscellaneous
- devices (gpio, LCD, spi, etc). Also, since these devices are
- implemented within the fpga fabric every instance of the device can be
- synthesised with different options that change the behaviour.
-
- Each IP-core has a set of parameters which the FPGA designer can use to
- control how the core is synthesized. Historically, the EDK tool would
- extract the device parameters relevant to device drivers and copy them
- into an 'xparameters.h' in the form of #define symbols. This tells the
- device drivers how the IP cores are configured, but it requres the kernel
- to be recompiled every time the FPGA bitstream is resynthesized.
-
- The new approach is to export the parameters into the device tree and
- generate a new device tree each time the FPGA bitstream changes. The
- parameters which used to be exported as #defines will now become
- properties of the device node. In general, device nodes for IP-cores
- will take the following form:
-
- (name): (generic-name)@(base-address) {
- compatible = "xlnx,(ip-core-name)-(HW_VER)"
- [, (list of compatible devices), ...];
- reg = <(baseaddr) (size)>;
- interrupt-parent = <&interrupt-controller-phandle>;
- interrupts = < ... >;
- xlnx,(parameter1) = "(string-value)";
- xlnx,(parameter2) = <(int-value)>;
- };
-
- (generic-name): an open firmware-style name that describes the
- generic class of device. Preferably, this is one word, such
- as 'serial' or 'ethernet'.
- (ip-core-name): the name of the ip block (given after the BEGIN
- directive in system.mhs). Should be in lowercase
- and all underscores '_' converted to dashes '-'.
- (name): is derived from the "PARAMETER INSTANCE" value.
- (parameter#): C_* parameters from system.mhs. The C_ prefix is
- dropped from the parameter name, the name is converted
- to lowercase and all underscore '_' characters are
- converted to dashes '-'.
- (baseaddr): the baseaddr parameter value (often named C_BASEADDR).
- (HW_VER): from the HW_VER parameter.
- (size): the address range size (often C_HIGHADDR - C_BASEADDR + 1).
-
- Typically, the compatible list will include the exact IP core version
- followed by an older IP core version which implements the same
- interface or any other device with the same interface.
-
- 'reg', 'interrupt-parent' and 'interrupts' are all optional properties.
-
- For example, the following block from system.mhs:
-
- BEGIN opb_uartlite
- PARAMETER INSTANCE = opb_uartlite_0
- PARAMETER HW_VER = 1.00.b
- PARAMETER C_BAUDRATE = 115200
- PARAMETER C_DATA_BITS = 8
- PARAMETER C_ODD_PARITY = 0
- PARAMETER C_USE_PARITY = 0
- PARAMETER C_CLK_FREQ = 50000000
- PARAMETER C_BASEADDR = 0xEC100000
- PARAMETER C_HIGHADDR = 0xEC10FFFF
- BUS_INTERFACE SOPB = opb_7
- PORT OPB_Clk = CLK_50MHz
- PORT Interrupt = opb_uartlite_0_Interrupt
- PORT RX = opb_uartlite_0_RX
- PORT TX = opb_uartlite_0_TX
- PORT OPB_Rst = sys_bus_reset_0
- END
-
- becomes the following device tree node:
-
- opb_uartlite_0: serial@ec100000 {
- device_type = "serial";
- compatible = "xlnx,opb-uartlite-1.00.b";
- reg = <ec100000 10000>;
- interrupt-parent = <&opb_intc_0>;
- interrupts = <1 0>; // got this from the opb_intc parameters
- current-speed = <d#115200>; // standard serial device prop
- clock-frequency = <d#50000000>; // standard serial device prop
- xlnx,data-bits = <8>;
- xlnx,odd-parity = <0>;
- xlnx,use-parity = <0>;
- };
-
- Some IP cores actually implement 2 or more logical devices. In
- this case, the device should still describe the whole IP core with
- a single node and add a child node for each logical device. The
- ranges property can be used to translate from parent IP-core to the
- registers of each device. In addition, the parent node should be
- compatible with the bus type 'xlnx,compound', and should contain
- #address-cells and #size-cells, as with any other bus. (Note: this
- makes the assumption that both logical devices have the same bus
- binding. If this is not true, then separate nodes should be used
- for each logical device). The 'cell-index' property can be used to
- enumerate logical devices within an IP core. For example, the
- following is the system.mhs entry for the dual ps2 controller found
- on the ml403 reference design.
-
- BEGIN opb_ps2_dual_ref
- PARAMETER INSTANCE = opb_ps2_dual_ref_0
- PARAMETER HW_VER = 1.00.a
- PARAMETER C_BASEADDR = 0xA9000000
- PARAMETER C_HIGHADDR = 0xA9001FFF
- BUS_INTERFACE SOPB = opb_v20_0
- PORT Sys_Intr1 = ps2_1_intr
- PORT Sys_Intr2 = ps2_2_intr
- PORT Clkin1 = ps2_clk_rx_1
- PORT Clkin2 = ps2_clk_rx_2
- PORT Clkpd1 = ps2_clk_tx_1
- PORT Clkpd2 = ps2_clk_tx_2
- PORT Rx1 = ps2_d_rx_1
- PORT Rx2 = ps2_d_rx_2
- PORT Txpd1 = ps2_d_tx_1
- PORT Txpd2 = ps2_d_tx_2
- END
-
- It would result in the following device tree nodes:
-
- opb_ps2_dual_ref_0: opb-ps2-dual-ref@a9000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,compound";
- ranges = <0 a9000000 2000>;
- // If this device had extra parameters, then they would
- // go here.
- ps2@0 {
- compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
- reg = <0 40>;
- interrupt-parent = <&opb_intc_0>;
- interrupts = <3 0>;
- cell-index = <0>;
- };
- ps2@1000 {
- compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
- reg = <1000 40>;
- interrupt-parent = <&opb_intc_0>;
- interrupts = <3 0>;
- cell-index = <0>;
- };
- };
-
- Also, the system.mhs file defines bus attachments from the processor
- to the devices. The device tree structure should reflect the bus
- attachments. Again an example; this system.mhs fragment:
-
- BEGIN ppc405_virtex4
- PARAMETER INSTANCE = ppc405_0
- PARAMETER HW_VER = 1.01.a
- BUS_INTERFACE DPLB = plb_v34_0
- BUS_INTERFACE IPLB = plb_v34_0
- END
-
- BEGIN opb_intc
- PARAMETER INSTANCE = opb_intc_0
- PARAMETER HW_VER = 1.00.c
- PARAMETER C_BASEADDR = 0xD1000FC0
- PARAMETER C_HIGHADDR = 0xD1000FDF
- BUS_INTERFACE SOPB = opb_v20_0
- END
-
- BEGIN opb_uart16550
- PARAMETER INSTANCE = opb_uart16550_0
- PARAMETER HW_VER = 1.00.d
- PARAMETER C_BASEADDR = 0xa0000000
- PARAMETER C_HIGHADDR = 0xa0001FFF
- BUS_INTERFACE SOPB = opb_v20_0
- END
-
- BEGIN plb_v34
- PARAMETER INSTANCE = plb_v34_0
- PARAMETER HW_VER = 1.02.a
- END
-
- BEGIN plb_bram_if_cntlr
- PARAMETER INSTANCE = plb_bram_if_cntlr_0
- PARAMETER HW_VER = 1.00.b
- PARAMETER C_BASEADDR = 0xFFFF0000
- PARAMETER C_HIGHADDR = 0xFFFFFFFF
- BUS_INTERFACE SPLB = plb_v34_0
- END
-
- BEGIN plb2opb_bridge
- PARAMETER INSTANCE = plb2opb_bridge_0
- PARAMETER HW_VER = 1.01.a
- PARAMETER C_RNG0_BASEADDR = 0x20000000
- PARAMETER C_RNG0_HIGHADDR = 0x3FFFFFFF
- PARAMETER C_RNG1_BASEADDR = 0x60000000
- PARAMETER C_RNG1_HIGHADDR = 0x7FFFFFFF
- PARAMETER C_RNG2_BASEADDR = 0x80000000
- PARAMETER C_RNG2_HIGHADDR = 0xBFFFFFFF
- PARAMETER C_RNG3_BASEADDR = 0xC0000000
- PARAMETER C_RNG3_HIGHADDR = 0xDFFFFFFF
- BUS_INTERFACE SPLB = plb_v34_0
- BUS_INTERFACE MOPB = opb_v20_0
- END
-
- Gives this device tree (some properties removed for clarity):
-
- plb@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,plb-v34-1.02.a";
- device_type = "ibm,plb";
- ranges; // 1:1 translation
-
- plb_bram_if_cntrl_0: bram@ffff0000 {
- reg = <ffff0000 10000>;
- }
-
- opb@20000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <20000000 20000000 20000000
- 60000000 60000000 20000000
- 80000000 80000000 40000000
- c0000000 c0000000 20000000>;
-
- opb_uart16550_0: serial@a0000000 {
- reg = <a00000000 2000>;
- };
-
- opb_intc_0: interrupt-controller@d1000fc0 {
- reg = <d1000fc0 20>;
- };
- };
- };
-
- That covers the general approach to binding xilinx IP cores into the
- device tree. The following are bindings for specific devices:
-
- i) Xilinx ML300 Framebuffer
-
- Simple framebuffer device from the ML300 reference design (also on the
- ML403 reference design as well as others).
-
- Optional properties:
- - resolution = <xres yres> : pixel resolution of framebuffer. Some
- implementations use a different resolution.
- Default is <d#640 d#480>
- - virt-resolution = <xvirt yvirt> : Size of framebuffer in memory.
- Default is <d#1024 d#480>.
- - rotate-display (empty) : rotate display 180 degrees.
-
- ii) Xilinx SystemACE
-
- The Xilinx SystemACE device is used to program FPGAs from an FPGA
- bitstream stored on a CF card. It can also be used as a generic CF
- interface device.
-
- Optional properties:
- - 8-bit (empty) : Set this property for SystemACE in 8 bit mode
-
- iii) Xilinx EMAC and Xilinx TEMAC
-
- Xilinx Ethernet devices. In addition to general xilinx properties
- listed above, nodes for these devices should include a phy-handle
- property, and may include other common network device properties
- like local-mac-address.
-
- iv) Xilinx Uartlite
-
- Xilinx uartlite devices are simple fixed speed serial ports.
-
- Required properties:
- - current-speed : Baud rate of uartlite
-
- v) Xilinx hwicap
-
- Xilinx hwicap devices provide access to the configuration logic
- of the FPGA through the Internal Configuration Access Port
- (ICAP). The ICAP enables partial reconfiguration of the FPGA,
- readback of the configuration information, and some control over
- 'warm boots' of the FPGA fabric.
-
- Required properties:
- - xlnx,family : The family of the FPGA, necessary since the
- capabilities of the underlying ICAP hardware
- differ between different families. May be
- 'virtex2p', 'virtex4', or 'virtex5'.
-
- vi) Xilinx Uart 16550
-
- Xilinx UART 16550 devices are very similar to the NS16550 but with
- different register spacing and an offset from the base address.
-
- Required properties:
- - clock-frequency : Frequency of the clock input
- - reg-offset : A value of 3 is required
- - reg-shift : A value of 2 is required
-
- e) USB EHCI controllers
-
- Required properties:
- - compatible : should be "usb-ehci".
- - reg : should contain at least address and length of the standard EHCI
- register set for the device. Optional platform-dependent registers
- (debug-port or other) can be also specified here, but only after
- definition of standard EHCI registers.
- - interrupts : one EHCI interrupt should be described here.
- If device registers are implemented in big endian mode, the device
- node should have "big-endian-regs" property.
- If controller implementation operates with big endian descriptors,
- "big-endian-desc" property should be specified.
- If both big endian registers and descriptors are used by the controller
- implementation, "big-endian" property can be specified instead of having
- both "big-endian-regs" and "big-endian-desc".
-
- Example (Sequoia 440EPx):
- ehci@e0000300 {
- compatible = "ibm,usb-ehci-440epx", "usb-ehci";
- interrupt-parent = <&UIC0>;
- interrupts = <1a 4>;
- reg = <0 e0000300 90 0 e0000390 70>;
- big-endian;
- };
-
- f) MDIO on GPIOs
-
- Currently defined compatibles:
- - virtual,gpio-mdio
-
- MDC and MDIO lines connected to GPIO controllers are listed in the
- gpios property as described in section VIII.1 in the following order:
-
- MDC, MDIO.
-
- Example:
-
- mdio {
- compatible = "virtual,mdio-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- gpios = <&qe_pio_a 11
- &qe_pio_c 6>;
- };
-
- g) SPI (Serial Peripheral Interface) busses
-
- SPI busses can be described with a node for the SPI master device
- and a set of child nodes for each SPI slave on the bus. For this
- discussion, it is assumed that the system's SPI controller is in
- SPI master mode. This binding does not describe SPI controllers
- in slave mode.
-
- The SPI master node requires the following properties:
- - #address-cells - number of cells required to define a chip select
- address on the SPI bus.
- - #size-cells - should be zero.
- - compatible - name of SPI bus controller following generic names
- recommended practice.
- No other properties are required in the SPI bus node. It is assumed
- that a driver for an SPI bus device will understand that it is an SPI bus.
- However, the binding does not attempt to define the specific method for
- assigning chip select numbers. Since SPI chip select configuration is
- flexible and non-standardized, it is left out of this binding with the
- assumption that board specific platform code will be used to manage
- chip selects. Individual drivers can define additional properties to
- support describing the chip select layout.
-
- SPI slave nodes must be children of the SPI master node and can
- contain the following properties.
- - reg - (required) chip select address of device.
- - compatible - (required) name of SPI device following generic names
- recommended practice
- - spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz
- - spi-cpol - (optional) Empty property indicating device requires
- inverse clock polarity (CPOL) mode
- - spi-cpha - (optional) Empty property indicating device requires
- shifted clock phase (CPHA) mode
- - spi-cs-high - (optional) Empty property indicating device requires
- chip select active high
-
- SPI example for an MPC5200 SPI bus:
- spi@f00 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
- reg = <0xf00 0x20>;
- interrupts = <2 13 0 2 14 0>;
- interrupt-parent = <&mpc5200_pic>;
-
- ethernet-switch@0 {
- compatible = "micrel,ks8995m";
- spi-max-frequency = <1000000>;
- reg = <0>;
- };
-
- codec@1 {
- compatible = "ti,tlv320aic26";
- spi-max-frequency = <100000>;
- reg = <1>;
- };
- };
-
-VII - Marvell Discovery mv64[345]6x System Controller chips
-===========================================================
-
-The Marvell mv64[345]60 series of system controller chips contain
-many of the peripherals needed to implement a complete computer
-system. In this section, we define device tree nodes to describe
-the system controller chip itself and each of the peripherals
-which it contains. Compatible string values for each node are
-prefixed with the string "marvell,", for Marvell Technology Group Ltd.
-
-1) The /system-controller node
-
- This node is used to represent the system-controller and must be
- present when the system uses a system controller chip. The top-level
- system-controller node contains information that is global to all
- devices within the system controller chip. The node name begins
- with "system-controller" followed by the unit address, which is
- the base address of the memory-mapped register set for the system
- controller chip.
-
- Required properties:
-
- - ranges : Describes the translation of system controller addresses
- for memory mapped registers.
- - clock-frequency: Contains the main clock frequency for the system
- controller chip.
- - reg : This property defines the address and size of the
- memory-mapped registers contained within the system controller
- chip. The address specified in the "reg" property should match
- the unit address of the system-controller node.
- - #address-cells : Address representation for system controller
- devices. This field represents the number of cells needed to
- represent the address of the memory-mapped registers of devices
- within the system controller chip.
- - #size-cells : Size representation for for the memory-mapped
- registers within the system controller chip.
- - #interrupt-cells : Defines the width of cells used to represent
- interrupts.
-
- Optional properties:
-
- - model : The specific model of the system controller chip. Such
- as, "mv64360", "mv64460", or "mv64560".
- - compatible : A string identifying the compatibility identifiers
- of the system controller chip.
-
- The system-controller node contains child nodes for each system
- controller device that the platform uses. Nodes should not be created
- for devices which exist on the system controller chip but are not used
-
- Example Marvell Discovery mv64360 system-controller node:
-
- system-controller@f1000000 { /* Marvell Discovery mv64360 */
- #address-cells = <1>;
- #size-cells = <1>;
- model = "mv64360"; /* Default */
- compatible = "marvell,mv64360";
- clock-frequency = <133333333>;
- reg = <0xf1000000 0x10000>;
- virtual-reg = <0xf1000000>;
- ranges = <0x88000000 0x88000000 0x1000000 /* PCI 0 I/O Space */
- 0x80000000 0x80000000 0x8000000 /* PCI 0 MEM Space */
- 0xa0000000 0xa0000000 0x4000000 /* User FLASH */
- 0x00000000 0xf1000000 0x0010000 /* Bridge's regs */
- 0xf2000000 0xf2000000 0x0040000>;/* Integrated SRAM */
-
- [ child node definitions... ]
- }
-
-2) Child nodes of /system-controller
-
- a) Marvell Discovery MDIO bus
-
- The MDIO is a bus to which the PHY devices are connected. For each
- device that exists on this bus, a child node should be created. See
- the definition of the PHY node below for an example of how to define
- a PHY.
-
- Required properties:
- - #address-cells : Should be <1>
- - #size-cells : Should be <0>
- - device_type : Should be "mdio"
- - compatible : Should be "marvell,mv64360-mdio"
-
- Example:
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- device_type = "mdio";
- compatible = "marvell,mv64360-mdio";
-
- ethernet-phy@0 {
- ......
- };
- };
-
-
- b) Marvell Discovery ethernet controller
-
- The Discover ethernet controller is described with two levels
- of nodes. The first level describes an ethernet silicon block
- and the second level describes up to 3 ethernet nodes within
- that block. The reason for the multiple levels is that the
- registers for the node are interleaved within a single set
- of registers. The "ethernet-block" level describes the
- shared register set, and the "ethernet" nodes describe ethernet
- port-specific properties.
-
- Ethernet block node
-
- Required properties:
- - #address-cells : <1>
- - #size-cells : <0>
- - compatible : "marvell,mv64360-eth-block"
- - reg : Offset and length of the register set for this block
-
- Example Discovery Ethernet block node:
- ethernet-block@2000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "marvell,mv64360-eth-block";
- reg = <0x2000 0x2000>;
- ethernet@0 {
- .......
- };
- };
-
- Ethernet port node
-
- Required properties:
- - device_type : Should be "network".
- - compatible : Should be "marvell,mv64360-eth".
- - reg : Should be <0>, <1>, or <2>, according to which registers
- within the silicon block the device uses.
- - interrupts : <a> where a is the interrupt number for the port.
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
- - phy : the phandle for the PHY connected to this ethernet
- controller.
- - local-mac-address : 6 bytes, MAC address
-
- Example Discovery Ethernet port node:
- ethernet@0 {
- device_type = "network";
- compatible = "marvell,mv64360-eth";
- reg = <0>;
- interrupts = <32>;
- interrupt-parent = <&PIC>;
- phy = <&PHY0>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- };
-
-
-
- c) Marvell Discovery PHY nodes
-
- Required properties:
- - device_type : Should be "ethernet-phy"
- - interrupts : <a> where a is the interrupt number for this phy.
- - interrupt-parent : the phandle for the interrupt controller that
- services interrupts for this device.
- - reg : The ID number for the phy, usually a small integer
-
- Example Discovery PHY node:
- ethernet-phy@1 {
- device_type = "ethernet-phy";
- compatible = "broadcom,bcm5421";
- interrupts = <76>; /* GPP 12 */
- interrupt-parent = <&PIC>;
- reg = <1>;
- };
-
-
- d) Marvell Discovery SDMA nodes
-
- Represent DMA hardware associated with the MPSC (multiprotocol
- serial controllers).
-
- Required properties:
- - compatible : "marvell,mv64360-sdma"
- - reg : Offset and length of the register set for this device
- - interrupts : <a> where a is the interrupt number for the DMA
- device.
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery SDMA node:
- sdma@4000 {
- compatible = "marvell,mv64360-sdma";
- reg = <0x4000 0xc18>;
- virtual-reg = <0xf1004000>;
- interrupts = <36>;
- interrupt-parent = <&PIC>;
- };
-
-
- e) Marvell Discovery BRG nodes
-
- Represent baud rate generator hardware associated with the MPSC
- (multiprotocol serial controllers).
-
- Required properties:
- - compatible : "marvell,mv64360-brg"
- - reg : Offset and length of the register set for this device
- - clock-src : A value from 0 to 15 which selects the clock
- source for the baud rate generator. This value corresponds
- to the CLKS value in the BRGx configuration register. See
- the mv64x60 User's Manual.
- - clock-frequence : The frequency (in Hz) of the baud rate
- generator's input clock.
- - current-speed : The current speed setting (presumably by
- firmware) of the baud rate generator.
-
- Example Discovery BRG node:
- brg@b200 {
- compatible = "marvell,mv64360-brg";
- reg = <0xb200 0x8>;
- clock-src = <8>;
- clock-frequency = <133333333>;
- current-speed = <9600>;
- };
-
-
- f) Marvell Discovery CUNIT nodes
-
- Represent the Serial Communications Unit device hardware.
-
- Required properties:
- - reg : Offset and length of the register set for this device
-
- Example Discovery CUNIT node:
- cunit@f200 {
- reg = <0xf200 0x200>;
- };
-
-
- g) Marvell Discovery MPSCROUTING nodes
-
- Represent the Discovery's MPSC routing hardware
-
- Required properties:
- - reg : Offset and length of the register set for this device
-
- Example Discovery CUNIT node:
- mpscrouting@b500 {
- reg = <0xb400 0xc>;
- };
-
-
- h) Marvell Discovery MPSCINTR nodes
-
- Represent the Discovery's MPSC DMA interrupt hardware registers
- (SDMA cause and mask registers).
-
- Required properties:
- - reg : Offset and length of the register set for this device
-
- Example Discovery MPSCINTR node:
- mpsintr@b800 {
- reg = <0xb800 0x100>;
- };
-
-
- i) Marvell Discovery MPSC nodes
-
- Represent the Discovery's MPSC (Multiprotocol Serial Controller)
- serial port.
-
- Required properties:
- - device_type : "serial"
- - compatible : "marvell,mv64360-mpsc"
- - reg : Offset and length of the register set for this device
- - sdma : the phandle for the SDMA node used by this port
- - brg : the phandle for the BRG node used by this port
- - cunit : the phandle for the CUNIT node used by this port
- - mpscrouting : the phandle for the MPSCROUTING node used by this port
- - mpscintr : the phandle for the MPSCINTR node used by this port
- - cell-index : the hardware index of this cell in the MPSC core
- - max_idle : value needed for MPSC CHR3 (Maximum Frame Length)
- register
- - interrupts : <a> where a is the interrupt number for the MPSC.
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery MPSCINTR node:
- mpsc@8000 {
- device_type = "serial";
- compatible = "marvell,mv64360-mpsc";
- reg = <0x8000 0x38>;
- virtual-reg = <0xf1008000>;
- sdma = <&SDMA0>;
- brg = <&BRG0>;
- cunit = <&CUNIT>;
- mpscrouting = <&MPSCROUTING>;
- mpscintr = <&MPSCINTR>;
- cell-index = <0>;
- max_idle = <40>;
- interrupts = <40>;
- interrupt-parent = <&PIC>;
- };
-
-
- j) Marvell Discovery Watch Dog Timer nodes
-
- Represent the Discovery's watchdog timer hardware
-
- Required properties:
- - compatible : "marvell,mv64360-wdt"
- - reg : Offset and length of the register set for this device
-
- Example Discovery Watch Dog Timer node:
- wdt@b410 {
- compatible = "marvell,mv64360-wdt";
- reg = <0xb410 0x8>;
- };
-
-
- k) Marvell Discovery I2C nodes
-
- Represent the Discovery's I2C hardware
-
- Required properties:
- - device_type : "i2c"
- - compatible : "marvell,mv64360-i2c"
- - reg : Offset and length of the register set for this device
- - interrupts : <a> where a is the interrupt number for the I2C.
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery I2C node:
- compatible = "marvell,mv64360-i2c";
- reg = <0xc000 0x20>;
- virtual-reg = <0xf100c000>;
- interrupts = <37>;
- interrupt-parent = <&PIC>;
- };
-
-
- l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
-
- Represent the Discovery's PIC hardware
-
- Required properties:
- - #interrupt-cells : <1>
- - #address-cells : <0>
- - compatible : "marvell,mv64360-pic"
- - reg : Offset and length of the register set for this device
- - interrupt-controller
-
- Example Discovery PIC node:
- pic {
- #interrupt-cells = <1>;
- #address-cells = <0>;
- compatible = "marvell,mv64360-pic";
- reg = <0x0 0x88>;
- interrupt-controller;
- };
-
-
- m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
-
- Represent the Discovery's MPP hardware
-
- Required properties:
- - compatible : "marvell,mv64360-mpp"
- - reg : Offset and length of the register set for this device
-
- Example Discovery MPP node:
- mpp@f000 {
- compatible = "marvell,mv64360-mpp";
- reg = <0xf000 0x10>;
- };
-
-
- n) Marvell Discovery GPP (General Purpose Pins) nodes
-
- Represent the Discovery's GPP hardware
-
- Required properties:
- - compatible : "marvell,mv64360-gpp"
- - reg : Offset and length of the register set for this device
-
- Example Discovery GPP node:
- gpp@f000 {
- compatible = "marvell,mv64360-gpp";
- reg = <0xf100 0x20>;
- };
-
-
- o) Marvell Discovery PCI host bridge node
-
- Represents the Discovery's PCI host bridge device. The properties
- for this node conform to Rev 2.1 of the PCI Bus Binding to IEEE
- 1275-1994. A typical value for the compatible property is
- "marvell,mv64360-pci".
-
- Example Discovery PCI host bridge node
- pci@80000000 {
- #address-cells = <3>;
- #size-cells = <2>;
- #interrupt-cells = <1>;
- device_type = "pci";
- compatible = "marvell,mv64360-pci";
- reg = <0xcf8 0x8>;
- ranges = <0x01000000 0x0 0x0
- 0x88000000 0x0 0x01000000
- 0x02000000 0x0 0x80000000
- 0x80000000 0x0 0x08000000>;
- bus-range = <0 255>;
- clock-frequency = <66000000>;
- interrupt-parent = <&PIC>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x0a */
- 0x5000 0 0 1 &PIC 80
- 0x5000 0 0 2 &PIC 81
- 0x5000 0 0 3 &PIC 91
- 0x5000 0 0 4 &PIC 93
-
- /* IDSEL 0x0b */
- 0x5800 0 0 1 &PIC 91
- 0x5800 0 0 2 &PIC 93
- 0x5800 0 0 3 &PIC 80
- 0x5800 0 0 4 &PIC 81
-
- /* IDSEL 0x0c */
- 0x6000 0 0 1 &PIC 91
- 0x6000 0 0 2 &PIC 93
- 0x6000 0 0 3 &PIC 80
- 0x6000 0 0 4 &PIC 81
-
- /* IDSEL 0x0d */
- 0x6800 0 0 1 &PIC 93
- 0x6800 0 0 2 &PIC 80
- 0x6800 0 0 3 &PIC 81
- 0x6800 0 0 4 &PIC 91
- >;
- };
-
-
- p) Marvell Discovery CPU Error nodes
-
- Represent the Discovery's CPU error handler device.
-
- Required properties:
- - compatible : "marvell,mv64360-cpu-error"
- - reg : Offset and length of the register set for this device
- - interrupts : the interrupt number for this device
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery CPU Error node:
- cpu-error@0070 {
- compatible = "marvell,mv64360-cpu-error";
- reg = <0x70 0x10 0x128 0x28>;
- interrupts = <3>;
- interrupt-parent = <&PIC>;
- };
-
-
- q) Marvell Discovery SRAM Controller nodes
-
- Represent the Discovery's SRAM controller device.
-
- Required properties:
- - compatible : "marvell,mv64360-sram-ctrl"
- - reg : Offset and length of the register set for this device
- - interrupts : the interrupt number for this device
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery SRAM Controller node:
- sram-ctrl@0380 {
- compatible = "marvell,mv64360-sram-ctrl";
- reg = <0x380 0x80>;
- interrupts = <13>;
- interrupt-parent = <&PIC>;
- };
-
-
- r) Marvell Discovery PCI Error Handler nodes
-
- Represent the Discovery's PCI error handler device.
-
- Required properties:
- - compatible : "marvell,mv64360-pci-error"
- - reg : Offset and length of the register set for this device
- - interrupts : the interrupt number for this device
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery PCI Error Handler node:
- pci-error@1d40 {
- compatible = "marvell,mv64360-pci-error";
- reg = <0x1d40 0x40 0xc28 0x4>;
- interrupts = <12>;
- interrupt-parent = <&PIC>;
- };
-
-
- s) Marvell Discovery Memory Controller nodes
-
- Represent the Discovery's memory controller device.
-
- Required properties:
- - compatible : "marvell,mv64360-mem-ctrl"
- - reg : Offset and length of the register set for this device
- - interrupts : the interrupt number for this device
- - interrupt-parent : the phandle for the interrupt controller
- that services interrupts for this device.
-
- Example Discovery Memory Controller node:
- mem-ctrl@1400 {
- compatible = "marvell,mv64360-mem-ctrl";
- reg = <0x1400 0x60>;
- interrupts = <17>;
- interrupt-parent = <&PIC>;
- };
-
-
-VIII - Specifying interrupt information for devices
+VII - Specifying interrupt information for devices
===================================================
The device tree represents the busses and devices of a hardware
@@ -2439,56 +1324,7 @@
2 = high to low edge sensitive type enabled
3 = low to high edge sensitive type enabled
-IX - Specifying GPIO information for devices
-============================================
-
-1) gpios property
------------------
-
-Nodes that makes use of GPIOs should define them using `gpios' property,
-format of which is: <&gpio-controller1-phandle gpio1-specifier
- &gpio-controller2-phandle gpio2-specifier
- 0 /* holes are permitted, means no GPIO 3 */
- &gpio-controller4-phandle gpio4-specifier
- ...>;
-
-Note that gpio-specifier length is controller dependent.
-
-gpio-specifier may encode: bank, pin position inside the bank,
-whether pin is open-drain and whether pin is logically inverted.
-
-Example of the node using GPIOs:
-
- node {
- gpios = <&qe_pio_e 18 0>;
- };
-
-In this example gpio-specifier is "18 0" and encodes GPIO pin number,
-and empty GPIO flags as accepted by the "qe_pio_e" gpio-controller.
-
-2) gpio-controller nodes
-------------------------
-
-Every GPIO controller node must have #gpio-cells property defined,
-this information will be used to translate gpio-specifiers.
-
-Example of two SOC GPIO banks defined as gpio-controller nodes:
-
- qe_pio_a: gpio-controller@1400 {
- #gpio-cells = <2>;
- compatible = "fsl,qe-pario-bank-a", "fsl,qe-pario-bank";
- reg = <0x1400 0x18>;
- gpio-controller;
- };
-
- qe_pio_e: gpio-controller@1460 {
- #gpio-cells = <2>;
- compatible = "fsl,qe-pario-bank-e", "fsl,qe-pario-bank";
- reg = <0x1460 0x18>;
- gpio-controller;
- };
-
-X - Specifying Device Power Management Information (sleep property)
+VIII - Specifying Device Power Management Information (sleep property)
===================================================================
Devices on SOCs often have mechanisms for placing devices into low-power
diff --git a/Documentation/powerpc/dts-bindings/4xx/emac.txt b/Documentation/powerpc/dts-bindings/4xx/emac.txt
new file mode 100644
index 0000000..2161334
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/emac.txt
@@ -0,0 +1,148 @@
+ 4xx/Axon EMAC ethernet nodes
+
+ The EMAC ethernet controller in IBM and AMCC 4xx chips, and also
+ the Axon bridge. To operate this needs to interact with a ths
+ special McMAL DMA controller, and sometimes an RGMII or ZMII
+ interface. In addition to the nodes and properties described
+ below, the node for the OPB bus on which the EMAC sits must have a
+ correct clock-frequency property.
+
+ i) The EMAC node itself
+
+ Required properties:
+ - device_type : "network"
+
+ - compatible : compatible list, contains 2 entries, first is
+ "ibm,emac-CHIP" where CHIP is the host ASIC (440gx,
+ 405gp, Axon) and second is either "ibm,emac" or
+ "ibm,emac4". For Axon, thus, we have: "ibm,emac-axon",
+ "ibm,emac4"
+ - interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ>
+ - interrupt-parent : optional, if needed for interrupt mapping
+ - reg : <registers mapping>
+ - local-mac-address : 6 bytes, MAC address
+ - mal-device : phandle of the associated McMAL node
+ - mal-tx-channel : 1 cell, index of the tx channel on McMAL associated
+ with this EMAC
+ - mal-rx-channel : 1 cell, index of the rx channel on McMAL associated
+ with this EMAC
+ - cell-index : 1 cell, hardware index of the EMAC cell on a given
+ ASIC (typically 0x0 and 0x1 for EMAC0 and EMAC1 on
+ each Axon chip)
+ - max-frame-size : 1 cell, maximum frame size supported in bytes
+ - rx-fifo-size : 1 cell, Rx fifo size in bytes for 10 and 100 Mb/sec
+ operations.
+ For Axon, 2048
+ - tx-fifo-size : 1 cell, Tx fifo size in bytes for 10 and 100 Mb/sec
+ operations.
+ For Axon, 2048.
+ - fifo-entry-size : 1 cell, size of a fifo entry (used to calculate
+ thresholds).
+ For Axon, 0x00000010
+ - mal-burst-size : 1 cell, MAL burst size (used to calculate thresholds)
+ in bytes.
+ For Axon, 0x00000100 (I think ...)
+ - phy-mode : string, mode of operations of the PHY interface.
+ Supported values are: "mii", "rmii", "smii", "rgmii",
+ "tbi", "gmii", rtbi", "sgmii".
+ For Axon on CAB, it is "rgmii"
+ - mdio-device : 1 cell, required iff using shared MDIO registers
+ (440EP). phandle of the EMAC to use to drive the
+ MDIO lines for the PHY used by this EMAC.
+ - zmii-device : 1 cell, required iff connected to a ZMII. phandle of
+ the ZMII device node
+ - zmii-channel : 1 cell, required iff connected to a ZMII. Which ZMII
+ channel or 0xffffffff if ZMII is only used for MDIO.
+ - rgmii-device : 1 cell, required iff connected to an RGMII. phandle
+ of the RGMII device node.
+ For Axon: phandle of plb5/plb4/opb/rgmii
+ - rgmii-channel : 1 cell, required iff connected to an RGMII. Which
+ RGMII channel is used by this EMAC.
+ Fox Axon: present, whatever value is appropriate for each
+ EMAC, that is the content of the current (bogus) "phy-port"
+ property.
+
+ Optional properties:
+ - phy-address : 1 cell, optional, MDIO address of the PHY. If absent,
+ a search is performed.
+ - phy-map : 1 cell, optional, bitmap of addresses to probe the PHY
+ for, used if phy-address is absent. bit 0x00000001 is
+ MDIO address 0.
+ For Axon it can be absent, though my current driver
+ doesn't handle phy-address yet so for now, keep
+ 0x00ffffff in it.
+ - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
+ operations (if absent the value is the same as
+ rx-fifo-size). For Axon, either absent or 2048.
+ - tx-fifo-size-gige : 1 cell, Tx fifo size in bytes for 1000 Mb/sec
+ operations (if absent the value is the same as
+ tx-fifo-size). For Axon, either absent or 2048.
+ - tah-device : 1 cell, optional. If connected to a TAH engine for
+ offload, phandle of the TAH device node.
+ - tah-channel : 1 cell, optional. If appropriate, channel used on the
+ TAH engine.
+
+ Example:
+
+ EMAC0: ethernet@40000800 {
+ device_type = "network";
+ compatible = "ibm,emac-440gp", "ibm,emac";
+ interrupt-parent = <&UIC1>;
+ interrupts = <1c 4 1d 4>;
+ reg = <40000800 70>;
+ local-mac-address = [00 04 AC E3 1B 1E];
+ mal-device = <&MAL0>;
+ mal-tx-channel = <0 1>;
+ mal-rx-channel = <0>;
+ cell-index = <0>;
+ max-frame-size = <5dc>;
+ rx-fifo-size = <1000>;
+ tx-fifo-size = <800>;
+ phy-mode = "rmii";
+ phy-map = <00000001>;
+ zmii-device = <&ZMII0>;
+ zmii-channel = <0>;
+ };
+
+ ii) McMAL node
+
+ Required properties:
+ - device_type : "dma-controller"
+ - compatible : compatible list, containing 2 entries, first is
+ "ibm,mcmal-CHIP" where CHIP is the host ASIC (like
+ emac) and the second is either "ibm,mcmal" or
+ "ibm,mcmal2".
+ For Axon, "ibm,mcmal-axon","ibm,mcmal2"
+ - interrupts : <interrupt mapping for the MAL interrupts sources:
+ 5 sources: tx_eob, rx_eob, serr, txde, rxde>.
+ For Axon: This is _different_ from the current
+ firmware. We use the "delayed" interrupts for txeob
+ and rxeob. Thus we end up with mapping those 5 MPIC
+ interrupts, all level positive sensitive: 10, 11, 32,
+ 33, 34 (in decimal)
+ - dcr-reg : < DCR registers range >
+ - dcr-parent : if needed for dcr-reg
+ - num-tx-chans : 1 cell, number of Tx channels
+ - num-rx-chans : 1 cell, number of Rx channels
+
+ iii) ZMII node
+
+ Required properties:
+ - compatible : compatible list, containing 2 entries, first is
+ "ibm,zmii-CHIP" where CHIP is the host ASIC (like
+ EMAC) and the second is "ibm,zmii".
+ For Axon, there is no ZMII node.
+ - reg : <registers mapping>
+
+ iv) RGMII node
+
+ Required properties:
+ - compatible : compatible list, containing 2 entries, first is
+ "ibm,rgmii-CHIP" where CHIP is the host ASIC (like
+ EMAC) and the second is "ibm,rgmii".
+ For Axon, "ibm,rgmii-axon","ibm,rgmii"
+ - reg : <registers mapping>
+ - revision : as provided by the RGMII new version register if
+ available.
+ For Axon: 0x0000012a
+
diff --git a/Documentation/powerpc/dts-bindings/gpio/gpio.txt b/Documentation/powerpc/dts-bindings/gpio/gpio.txt
new file mode 100644
index 0000000..edaa84d
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/gpio/gpio.txt
@@ -0,0 +1,50 @@
+Specifying GPIO information for devices
+============================================
+
+1) gpios property
+-----------------
+
+Nodes that makes use of GPIOs should define them using `gpios' property,
+format of which is: <&gpio-controller1-phandle gpio1-specifier
+ &gpio-controller2-phandle gpio2-specifier
+ 0 /* holes are permitted, means no GPIO 3 */
+ &gpio-controller4-phandle gpio4-specifier
+ ...>;
+
+Note that gpio-specifier length is controller dependent.
+
+gpio-specifier may encode: bank, pin position inside the bank,
+whether pin is open-drain and whether pin is logically inverted.
+
+Example of the node using GPIOs:
+
+ node {
+ gpios = <&qe_pio_e 18 0>;
+ };
+
+In this example gpio-specifier is "18 0" and encodes GPIO pin number,
+and empty GPIO flags as accepted by the "qe_pio_e" gpio-controller.
+
+2) gpio-controller nodes
+------------------------
+
+Every GPIO controller node must have #gpio-cells property defined,
+this information will be used to translate gpio-specifiers.
+
+Example of two SOC GPIO banks defined as gpio-controller nodes:
+
+ qe_pio_a: gpio-controller@1400 {
+ #gpio-cells = <2>;
+ compatible = "fsl,qe-pario-bank-a", "fsl,qe-pario-bank";
+ reg = <0x1400 0x18>;
+ gpio-controller;
+ };
+
+ qe_pio_e: gpio-controller@1460 {
+ #gpio-cells = <2>;
+ compatible = "fsl,qe-pario-bank-e", "fsl,qe-pario-bank";
+ reg = <0x1460 0x18>;
+ gpio-controller;
+ };
+
+
diff --git a/Documentation/powerpc/dts-bindings/gpio/led.txt b/Documentation/powerpc/dts-bindings/gpio/led.txt
index 4fe14de..064db92 100644
--- a/Documentation/powerpc/dts-bindings/gpio/led.txt
+++ b/Documentation/powerpc/dts-bindings/gpio/led.txt
@@ -16,10 +16,17 @@
string defining the trigger assigned to the LED. Current triggers are:
"backlight" - LED will act as a back-light, controlled by the framebuffer
system
- "default-on" - LED will turn on
+ "default-on" - LED will turn on, but see "default-state" below
"heartbeat" - LED "double" flashes at a load average based rate
"ide-disk" - LED indicates disk activity
"timer" - LED flashes at a fixed, configurable rate
+- default-state: (optional) The initial state of the LED. Valid
+ values are "on", "off", and "keep". If the LED is already on or off
+ and the default-state property is set the to same value, then no
+ glitch should be produced where the LED momentarily turns off (or
+ on). The "keep" setting will keep the LED at whatever its current
+ state is, without producing a glitch. The default is off if this
+ property is not present.
Examples:
@@ -30,14 +37,22 @@
gpios = <&mcu_pio 0 1>; /* Active low */
linux,default-trigger = "ide-disk";
};
+
+ fault {
+ gpios = <&mcu_pio 1 0>;
+ /* Keep LED on if BIOS detected hardware fault */
+ default-state = "keep";
+ };
};
run-control {
compatible = "gpio-leds";
red {
gpios = <&mpc8572 6 0>;
+ default-state = "off";
};
green {
gpios = <&mpc8572 7 0>;
+ default-state = "on";
};
}
diff --git a/Documentation/powerpc/dts-bindings/gpio/mdio.txt b/Documentation/powerpc/dts-bindings/gpio/mdio.txt
new file mode 100644
index 0000000..bc95495
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/gpio/mdio.txt
@@ -0,0 +1,19 @@
+MDIO on GPIOs
+
+Currently defined compatibles:
+- virtual,gpio-mdio
+
+MDC and MDIO lines connected to GPIO controllers are listed in the
+gpios property as described in section VIII.1 in the following order:
+
+MDC, MDIO.
+
+Example:
+
+mdio {
+ compatible = "virtual,mdio-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpios = <&qe_pio_a 11
+ &qe_pio_c 6>;
+};
diff --git a/Documentation/powerpc/dts-bindings/marvell.txt b/Documentation/powerpc/dts-bindings/marvell.txt
new file mode 100644
index 0000000..3708a2f
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/marvell.txt
@@ -0,0 +1,521 @@
+Marvell Discovery mv64[345]6x System Controller chips
+===========================================================
+
+The Marvell mv64[345]60 series of system controller chips contain
+many of the peripherals needed to implement a complete computer
+system. In this section, we define device tree nodes to describe
+the system controller chip itself and each of the peripherals
+which it contains. Compatible string values for each node are
+prefixed with the string "marvell,", for Marvell Technology Group Ltd.
+
+1) The /system-controller node
+
+ This node is used to represent the system-controller and must be
+ present when the system uses a system controller chip. The top-level
+ system-controller node contains information that is global to all
+ devices within the system controller chip. The node name begins
+ with "system-controller" followed by the unit address, which is
+ the base address of the memory-mapped register set for the system
+ controller chip.
+
+ Required properties:
+
+ - ranges : Describes the translation of system controller addresses
+ for memory mapped registers.
+ - clock-frequency: Contains the main clock frequency for the system
+ controller chip.
+ - reg : This property defines the address and size of the
+ memory-mapped registers contained within the system controller
+ chip. The address specified in the "reg" property should match
+ the unit address of the system-controller node.
+ - #address-cells : Address representation for system controller
+ devices. This field represents the number of cells needed to
+ represent the address of the memory-mapped registers of devices
+ within the system controller chip.
+ - #size-cells : Size representation for for the memory-mapped
+ registers within the system controller chip.
+ - #interrupt-cells : Defines the width of cells used to represent
+ interrupts.
+
+ Optional properties:
+
+ - model : The specific model of the system controller chip. Such
+ as, "mv64360", "mv64460", or "mv64560".
+ - compatible : A string identifying the compatibility identifiers
+ of the system controller chip.
+
+ The system-controller node contains child nodes for each system
+ controller device that the platform uses. Nodes should not be created
+ for devices which exist on the system controller chip but are not used
+
+ Example Marvell Discovery mv64360 system-controller node:
+
+ system-controller@f1000000 { /* Marvell Discovery mv64360 */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ model = "mv64360"; /* Default */
+ compatible = "marvell,mv64360";
+ clock-frequency = <133333333>;
+ reg = <0xf1000000 0x10000>;
+ virtual-reg = <0xf1000000>;
+ ranges = <0x88000000 0x88000000 0x1000000 /* PCI 0 I/O Space */
+ 0x80000000 0x80000000 0x8000000 /* PCI 0 MEM Space */
+ 0xa0000000 0xa0000000 0x4000000 /* User FLASH */
+ 0x00000000 0xf1000000 0x0010000 /* Bridge's regs */
+ 0xf2000000 0xf2000000 0x0040000>;/* Integrated SRAM */
+
+ [ child node definitions... ]
+ }
+
+2) Child nodes of /system-controller
+
+ a) Marvell Discovery MDIO bus
+
+ The MDIO is a bus to which the PHY devices are connected. For each
+ device that exists on this bus, a child node should be created. See
+ the definition of the PHY node below for an example of how to define
+ a PHY.
+
+ Required properties:
+ - #address-cells : Should be <1>
+ - #size-cells : Should be <0>
+ - device_type : Should be "mdio"
+ - compatible : Should be "marvell,mv64360-mdio"
+
+ Example:
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ device_type = "mdio";
+ compatible = "marvell,mv64360-mdio";
+
+ ethernet-phy@0 {
+ ......
+ };
+ };
+
+
+ b) Marvell Discovery ethernet controller
+
+ The Discover ethernet controller is described with two levels
+ of nodes. The first level describes an ethernet silicon block
+ and the second level describes up to 3 ethernet nodes within
+ that block. The reason for the multiple levels is that the
+ registers for the node are interleaved within a single set
+ of registers. The "ethernet-block" level describes the
+ shared register set, and the "ethernet" nodes describe ethernet
+ port-specific properties.
+
+ Ethernet block node
+
+ Required properties:
+ - #address-cells : <1>
+ - #size-cells : <0>
+ - compatible : "marvell,mv64360-eth-block"
+ - reg : Offset and length of the register set for this block
+
+ Example Discovery Ethernet block node:
+ ethernet-block@2000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "marvell,mv64360-eth-block";
+ reg = <0x2000 0x2000>;
+ ethernet@0 {
+ .......
+ };
+ };
+
+ Ethernet port node
+
+ Required properties:
+ - device_type : Should be "network".
+ - compatible : Should be "marvell,mv64360-eth".
+ - reg : Should be <0>, <1>, or <2>, according to which registers
+ within the silicon block the device uses.
+ - interrupts : <a> where a is the interrupt number for the port.
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+ - phy : the phandle for the PHY connected to this ethernet
+ controller.
+ - local-mac-address : 6 bytes, MAC address
+
+ Example Discovery Ethernet port node:
+ ethernet@0 {
+ device_type = "network";
+ compatible = "marvell,mv64360-eth";
+ reg = <0>;
+ interrupts = <32>;
+ interrupt-parent = <&PIC>;
+ phy = <&PHY0>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+
+
+
+ c) Marvell Discovery PHY nodes
+
+ Required properties:
+ - device_type : Should be "ethernet-phy"
+ - interrupts : <a> where a is the interrupt number for this phy.
+ - interrupt-parent : the phandle for the interrupt controller that
+ services interrupts for this device.
+ - reg : The ID number for the phy, usually a small integer
+
+ Example Discovery PHY node:
+ ethernet-phy@1 {
+ device_type = "ethernet-phy";
+ compatible = "broadcom,bcm5421";
+ interrupts = <76>; /* GPP 12 */
+ interrupt-parent = <&PIC>;
+ reg = <1>;
+ };
+
+
+ d) Marvell Discovery SDMA nodes
+
+ Represent DMA hardware associated with the MPSC (multiprotocol
+ serial controllers).
+
+ Required properties:
+ - compatible : "marvell,mv64360-sdma"
+ - reg : Offset and length of the register set for this device
+ - interrupts : <a> where a is the interrupt number for the DMA
+ device.
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery SDMA node:
+ sdma@4000 {
+ compatible = "marvell,mv64360-sdma";
+ reg = <0x4000 0xc18>;
+ virtual-reg = <0xf1004000>;
+ interrupts = <36>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ e) Marvell Discovery BRG nodes
+
+ Represent baud rate generator hardware associated with the MPSC
+ (multiprotocol serial controllers).
+
+ Required properties:
+ - compatible : "marvell,mv64360-brg"
+ - reg : Offset and length of the register set for this device
+ - clock-src : A value from 0 to 15 which selects the clock
+ source for the baud rate generator. This value corresponds
+ to the CLKS value in the BRGx configuration register. See
+ the mv64x60 User's Manual.
+ - clock-frequence : The frequency (in Hz) of the baud rate
+ generator's input clock.
+ - current-speed : The current speed setting (presumably by
+ firmware) of the baud rate generator.
+
+ Example Discovery BRG node:
+ brg@b200 {
+ compatible = "marvell,mv64360-brg";
+ reg = <0xb200 0x8>;
+ clock-src = <8>;
+ clock-frequency = <133333333>;
+ current-speed = <9600>;
+ };
+
+
+ f) Marvell Discovery CUNIT nodes
+
+ Represent the Serial Communications Unit device hardware.
+
+ Required properties:
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery CUNIT node:
+ cunit@f200 {
+ reg = <0xf200 0x200>;
+ };
+
+
+ g) Marvell Discovery MPSCROUTING nodes
+
+ Represent the Discovery's MPSC routing hardware
+
+ Required properties:
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery CUNIT node:
+ mpscrouting@b500 {
+ reg = <0xb400 0xc>;
+ };
+
+
+ h) Marvell Discovery MPSCINTR nodes
+
+ Represent the Discovery's MPSC DMA interrupt hardware registers
+ (SDMA cause and mask registers).
+
+ Required properties:
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery MPSCINTR node:
+ mpsintr@b800 {
+ reg = <0xb800 0x100>;
+ };
+
+
+ i) Marvell Discovery MPSC nodes
+
+ Represent the Discovery's MPSC (Multiprotocol Serial Controller)
+ serial port.
+
+ Required properties:
+ - device_type : "serial"
+ - compatible : "marvell,mv64360-mpsc"
+ - reg : Offset and length of the register set for this device
+ - sdma : the phandle for the SDMA node used by this port
+ - brg : the phandle for the BRG node used by this port
+ - cunit : the phandle for the CUNIT node used by this port
+ - mpscrouting : the phandle for the MPSCROUTING node used by this port
+ - mpscintr : the phandle for the MPSCINTR node used by this port
+ - cell-index : the hardware index of this cell in the MPSC core
+ - max_idle : value needed for MPSC CHR3 (Maximum Frame Length)
+ register
+ - interrupts : <a> where a is the interrupt number for the MPSC.
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery MPSCINTR node:
+ mpsc@8000 {
+ device_type = "serial";
+ compatible = "marvell,mv64360-mpsc";
+ reg = <0x8000 0x38>;
+ virtual-reg = <0xf1008000>;
+ sdma = <&SDMA0>;
+ brg = <&BRG0>;
+ cunit = <&CUNIT>;
+ mpscrouting = <&MPSCROUTING>;
+ mpscintr = <&MPSCINTR>;
+ cell-index = <0>;
+ max_idle = <40>;
+ interrupts = <40>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ j) Marvell Discovery Watch Dog Timer nodes
+
+ Represent the Discovery's watchdog timer hardware
+
+ Required properties:
+ - compatible : "marvell,mv64360-wdt"
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery Watch Dog Timer node:
+ wdt@b410 {
+ compatible = "marvell,mv64360-wdt";
+ reg = <0xb410 0x8>;
+ };
+
+
+ k) Marvell Discovery I2C nodes
+
+ Represent the Discovery's I2C hardware
+
+ Required properties:
+ - device_type : "i2c"
+ - compatible : "marvell,mv64360-i2c"
+ - reg : Offset and length of the register set for this device
+ - interrupts : <a> where a is the interrupt number for the I2C.
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery I2C node:
+ compatible = "marvell,mv64360-i2c";
+ reg = <0xc000 0x20>;
+ virtual-reg = <0xf100c000>;
+ interrupts = <37>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ l) Marvell Discovery PIC (Programmable Interrupt Controller) nodes
+
+ Represent the Discovery's PIC hardware
+
+ Required properties:
+ - #interrupt-cells : <1>
+ - #address-cells : <0>
+ - compatible : "marvell,mv64360-pic"
+ - reg : Offset and length of the register set for this device
+ - interrupt-controller
+
+ Example Discovery PIC node:
+ pic {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ compatible = "marvell,mv64360-pic";
+ reg = <0x0 0x88>;
+ interrupt-controller;
+ };
+
+
+ m) Marvell Discovery MPP (Multipurpose Pins) multiplexing nodes
+
+ Represent the Discovery's MPP hardware
+
+ Required properties:
+ - compatible : "marvell,mv64360-mpp"
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery MPP node:
+ mpp@f000 {
+ compatible = "marvell,mv64360-mpp";
+ reg = <0xf000 0x10>;
+ };
+
+
+ n) Marvell Discovery GPP (General Purpose Pins) nodes
+
+ Represent the Discovery's GPP hardware
+
+ Required properties:
+ - compatible : "marvell,mv64360-gpp"
+ - reg : Offset and length of the register set for this device
+
+ Example Discovery GPP node:
+ gpp@f000 {
+ compatible = "marvell,mv64360-gpp";
+ reg = <0xf100 0x20>;
+ };
+
+
+ o) Marvell Discovery PCI host bridge node
+
+ Represents the Discovery's PCI host bridge device. The properties
+ for this node conform to Rev 2.1 of the PCI Bus Binding to IEEE
+ 1275-1994. A typical value for the compatible property is
+ "marvell,mv64360-pci".
+
+ Example Discovery PCI host bridge node
+ pci@80000000 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ device_type = "pci";
+ compatible = "marvell,mv64360-pci";
+ reg = <0xcf8 0x8>;
+ ranges = <0x01000000 0x0 0x0
+ 0x88000000 0x0 0x01000000
+ 0x02000000 0x0 0x80000000
+ 0x80000000 0x0 0x08000000>;
+ bus-range = <0 255>;
+ clock-frequency = <66000000>;
+ interrupt-parent = <&PIC>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0a */
+ 0x5000 0 0 1 &PIC 80
+ 0x5000 0 0 2 &PIC 81
+ 0x5000 0 0 3 &PIC 91
+ 0x5000 0 0 4 &PIC 93
+
+ /* IDSEL 0x0b */
+ 0x5800 0 0 1 &PIC 91
+ 0x5800 0 0 2 &PIC 93
+ 0x5800 0 0 3 &PIC 80
+ 0x5800 0 0 4 &PIC 81
+
+ /* IDSEL 0x0c */
+ 0x6000 0 0 1 &PIC 91
+ 0x6000 0 0 2 &PIC 93
+ 0x6000 0 0 3 &PIC 80
+ 0x6000 0 0 4 &PIC 81
+
+ /* IDSEL 0x0d */
+ 0x6800 0 0 1 &PIC 93
+ 0x6800 0 0 2 &PIC 80
+ 0x6800 0 0 3 &PIC 81
+ 0x6800 0 0 4 &PIC 91
+ >;
+ };
+
+
+ p) Marvell Discovery CPU Error nodes
+
+ Represent the Discovery's CPU error handler device.
+
+ Required properties:
+ - compatible : "marvell,mv64360-cpu-error"
+ - reg : Offset and length of the register set for this device
+ - interrupts : the interrupt number for this device
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery CPU Error node:
+ cpu-error@0070 {
+ compatible = "marvell,mv64360-cpu-error";
+ reg = <0x70 0x10 0x128 0x28>;
+ interrupts = <3>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ q) Marvell Discovery SRAM Controller nodes
+
+ Represent the Discovery's SRAM controller device.
+
+ Required properties:
+ - compatible : "marvell,mv64360-sram-ctrl"
+ - reg : Offset and length of the register set for this device
+ - interrupts : the interrupt number for this device
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery SRAM Controller node:
+ sram-ctrl@0380 {
+ compatible = "marvell,mv64360-sram-ctrl";
+ reg = <0x380 0x80>;
+ interrupts = <13>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ r) Marvell Discovery PCI Error Handler nodes
+
+ Represent the Discovery's PCI error handler device.
+
+ Required properties:
+ - compatible : "marvell,mv64360-pci-error"
+ - reg : Offset and length of the register set for this device
+ - interrupts : the interrupt number for this device
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery PCI Error Handler node:
+ pci-error@1d40 {
+ compatible = "marvell,mv64360-pci-error";
+ reg = <0x1d40 0x40 0xc28 0x4>;
+ interrupts = <12>;
+ interrupt-parent = <&PIC>;
+ };
+
+
+ s) Marvell Discovery Memory Controller nodes
+
+ Represent the Discovery's memory controller device.
+
+ Required properties:
+ - compatible : "marvell,mv64360-mem-ctrl"
+ - reg : Offset and length of the register set for this device
+ - interrupts : the interrupt number for this device
+ - interrupt-parent : the phandle for the interrupt controller
+ that services interrupts for this device.
+
+ Example Discovery Memory Controller node:
+ mem-ctrl@1400 {
+ compatible = "marvell,mv64360-mem-ctrl";
+ reg = <0x1400 0x60>;
+ interrupts = <17>;
+ interrupt-parent = <&PIC>;
+ };
+
+
diff --git a/Documentation/powerpc/dts-bindings/phy.txt b/Documentation/powerpc/dts-bindings/phy.txt
new file mode 100644
index 0000000..bb8c742
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/phy.txt
@@ -0,0 +1,25 @@
+PHY nodes
+
+Required properties:
+
+ - device_type : Should be "ethernet-phy"
+ - interrupts : <a b> where a is the interrupt number and b is a
+ field that represents an encoding of the sense and level
+ information for the interrupt. This should be encoded based on
+ the information in section 2) depending on the type of interrupt
+ controller you have.
+ - interrupt-parent : the phandle for the interrupt controller that
+ services interrupts for this device.
+ - reg : The ID number for the phy, usually a small integer
+ - linux,phandle : phandle for this node; likely referenced by an
+ ethernet controller node.
+
+Example:
+
+ethernet-phy@0 {
+ linux,phandle = <2452000>
+ interrupt-parent = <40000>;
+ interrupts = <35 1>;
+ reg = <0>;
+ device_type = "ethernet-phy";
+};
diff --git a/Documentation/powerpc/dts-bindings/spi-bus.txt b/Documentation/powerpc/dts-bindings/spi-bus.txt
new file mode 100644
index 0000000..e782add
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/spi-bus.txt
@@ -0,0 +1,57 @@
+SPI (Serial Peripheral Interface) busses
+
+SPI busses can be described with a node for the SPI master device
+and a set of child nodes for each SPI slave on the bus. For this
+discussion, it is assumed that the system's SPI controller is in
+SPI master mode. This binding does not describe SPI controllers
+in slave mode.
+
+The SPI master node requires the following properties:
+- #address-cells - number of cells required to define a chip select
+ address on the SPI bus.
+- #size-cells - should be zero.
+- compatible - name of SPI bus controller following generic names
+ recommended practice.
+No other properties are required in the SPI bus node. It is assumed
+that a driver for an SPI bus device will understand that it is an SPI bus.
+However, the binding does not attempt to define the specific method for
+assigning chip select numbers. Since SPI chip select configuration is
+flexible and non-standardized, it is left out of this binding with the
+assumption that board specific platform code will be used to manage
+chip selects. Individual drivers can define additional properties to
+support describing the chip select layout.
+
+SPI slave nodes must be children of the SPI master node and can
+contain the following properties.
+- reg - (required) chip select address of device.
+- compatible - (required) name of SPI device following generic names
+ recommended practice
+- spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz
+- spi-cpol - (optional) Empty property indicating device requires
+ inverse clock polarity (CPOL) mode
+- spi-cpha - (optional) Empty property indicating device requires
+ shifted clock phase (CPHA) mode
+- spi-cs-high - (optional) Empty property indicating device requires
+ chip select active high
+
+SPI example for an MPC5200 SPI bus:
+ spi@f00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
+ reg = <0xf00 0x20>;
+ interrupts = <2 13 0 2 14 0>;
+ interrupt-parent = <&mpc5200_pic>;
+
+ ethernet-switch@0 {
+ compatible = "micrel,ks8995m";
+ spi-max-frequency = <1000000>;
+ reg = <0>;
+ };
+
+ codec@1 {
+ compatible = "ti,tlv320aic26";
+ spi-max-frequency = <100000>;
+ reg = <1>;
+ };
+ };
diff --git a/Documentation/powerpc/dts-bindings/usb-ehci.txt b/Documentation/powerpc/dts-bindings/usb-ehci.txt
new file mode 100644
index 0000000..fa18612
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/usb-ehci.txt
@@ -0,0 +1,25 @@
+USB EHCI controllers
+
+Required properties:
+ - compatible : should be "usb-ehci".
+ - reg : should contain at least address and length of the standard EHCI
+ register set for the device. Optional platform-dependent registers
+ (debug-port or other) can be also specified here, but only after
+ definition of standard EHCI registers.
+ - interrupts : one EHCI interrupt should be described here.
+If device registers are implemented in big endian mode, the device
+node should have "big-endian-regs" property.
+If controller implementation operates with big endian descriptors,
+"big-endian-desc" property should be specified.
+If both big endian registers and descriptors are used by the controller
+implementation, "big-endian" property can be specified instead of having
+both "big-endian-regs" and "big-endian-desc".
+
+Example (Sequoia 440EPx):
+ ehci@e0000300 {
+ compatible = "ibm,usb-ehci-440epx", "usb-ehci";
+ interrupt-parent = <&UIC0>;
+ interrupts = <1a 4>;
+ reg = <0 e0000300 90 0 e0000390 70>;
+ big-endian;
+ };
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/powerpc/dts-bindings/xilinx.txt
new file mode 100644
index 0000000..80339fe
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/xilinx.txt
@@ -0,0 +1,295 @@
+ d) Xilinx IP cores
+
+ The Xilinx EDK toolchain ships with a set of IP cores (devices) for use
+ in Xilinx Spartan and Virtex FPGAs. The devices cover the whole range
+ of standard device types (network, serial, etc.) and miscellaneous
+ devices (gpio, LCD, spi, etc). Also, since these devices are
+ implemented within the fpga fabric every instance of the device can be
+ synthesised with different options that change the behaviour.
+
+ Each IP-core has a set of parameters which the FPGA designer can use to
+ control how the core is synthesized. Historically, the EDK tool would
+ extract the device parameters relevant to device drivers and copy them
+ into an 'xparameters.h' in the form of #define symbols. This tells the
+ device drivers how the IP cores are configured, but it requres the kernel
+ to be recompiled every time the FPGA bitstream is resynthesized.
+
+ The new approach is to export the parameters into the device tree and
+ generate a new device tree each time the FPGA bitstream changes. The
+ parameters which used to be exported as #defines will now become
+ properties of the device node. In general, device nodes for IP-cores
+ will take the following form:
+
+ (name): (generic-name)@(base-address) {
+ compatible = "xlnx,(ip-core-name)-(HW_VER)"
+ [, (list of compatible devices), ...];
+ reg = <(baseaddr) (size)>;
+ interrupt-parent = <&interrupt-controller-phandle>;
+ interrupts = < ... >;
+ xlnx,(parameter1) = "(string-value)";
+ xlnx,(parameter2) = <(int-value)>;
+ };
+
+ (generic-name): an open firmware-style name that describes the
+ generic class of device. Preferably, this is one word, such
+ as 'serial' or 'ethernet'.
+ (ip-core-name): the name of the ip block (given after the BEGIN
+ directive in system.mhs). Should be in lowercase
+ and all underscores '_' converted to dashes '-'.
+ (name): is derived from the "PARAMETER INSTANCE" value.
+ (parameter#): C_* parameters from system.mhs. The C_ prefix is
+ dropped from the parameter name, the name is converted
+ to lowercase and all underscore '_' characters are
+ converted to dashes '-'.
+ (baseaddr): the baseaddr parameter value (often named C_BASEADDR).
+ (HW_VER): from the HW_VER parameter.
+ (size): the address range size (often C_HIGHADDR - C_BASEADDR + 1).
+
+ Typically, the compatible list will include the exact IP core version
+ followed by an older IP core version which implements the same
+ interface or any other device with the same interface.
+
+ 'reg', 'interrupt-parent' and 'interrupts' are all optional properties.
+
+ For example, the following block from system.mhs:
+
+ BEGIN opb_uartlite
+ PARAMETER INSTANCE = opb_uartlite_0
+ PARAMETER HW_VER = 1.00.b
+ PARAMETER C_BAUDRATE = 115200
+ PARAMETER C_DATA_BITS = 8
+ PARAMETER C_ODD_PARITY = 0
+ PARAMETER C_USE_PARITY = 0
+ PARAMETER C_CLK_FREQ = 50000000
+ PARAMETER C_BASEADDR = 0xEC100000
+ PARAMETER C_HIGHADDR = 0xEC10FFFF
+ BUS_INTERFACE SOPB = opb_7
+ PORT OPB_Clk = CLK_50MHz
+ PORT Interrupt = opb_uartlite_0_Interrupt
+ PORT RX = opb_uartlite_0_RX
+ PORT TX = opb_uartlite_0_TX
+ PORT OPB_Rst = sys_bus_reset_0
+ END
+
+ becomes the following device tree node:
+
+ opb_uartlite_0: serial@ec100000 {
+ device_type = "serial";
+ compatible = "xlnx,opb-uartlite-1.00.b";
+ reg = <ec100000 10000>;
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = <1 0>; // got this from the opb_intc parameters
+ current-speed = <d#115200>; // standard serial device prop
+ clock-frequency = <d#50000000>; // standard serial device prop
+ xlnx,data-bits = <8>;
+ xlnx,odd-parity = <0>;
+ xlnx,use-parity = <0>;
+ };
+
+ Some IP cores actually implement 2 or more logical devices. In
+ this case, the device should still describe the whole IP core with
+ a single node and add a child node for each logical device. The
+ ranges property can be used to translate from parent IP-core to the
+ registers of each device. In addition, the parent node should be
+ compatible with the bus type 'xlnx,compound', and should contain
+ #address-cells and #size-cells, as with any other bus. (Note: this
+ makes the assumption that both logical devices have the same bus
+ binding. If this is not true, then separate nodes should be used
+ for each logical device). The 'cell-index' property can be used to
+ enumerate logical devices within an IP core. For example, the
+ following is the system.mhs entry for the dual ps2 controller found
+ on the ml403 reference design.
+
+ BEGIN opb_ps2_dual_ref
+ PARAMETER INSTANCE = opb_ps2_dual_ref_0
+ PARAMETER HW_VER = 1.00.a
+ PARAMETER C_BASEADDR = 0xA9000000
+ PARAMETER C_HIGHADDR = 0xA9001FFF
+ BUS_INTERFACE SOPB = opb_v20_0
+ PORT Sys_Intr1 = ps2_1_intr
+ PORT Sys_Intr2 = ps2_2_intr
+ PORT Clkin1 = ps2_clk_rx_1
+ PORT Clkin2 = ps2_clk_rx_2
+ PORT Clkpd1 = ps2_clk_tx_1
+ PORT Clkpd2 = ps2_clk_tx_2
+ PORT Rx1 = ps2_d_rx_1
+ PORT Rx2 = ps2_d_rx_2
+ PORT Txpd1 = ps2_d_tx_1
+ PORT Txpd2 = ps2_d_tx_2
+ END
+
+ It would result in the following device tree nodes:
+
+ opb_ps2_dual_ref_0: opb-ps2-dual-ref@a9000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,compound";
+ ranges = <0 a9000000 2000>;
+ // If this device had extra parameters, then they would
+ // go here.
+ ps2@0 {
+ compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
+ reg = <0 40>;
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = <3 0>;
+ cell-index = <0>;
+ };
+ ps2@1000 {
+ compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
+ reg = <1000 40>;
+ interrupt-parent = <&opb_intc_0>;
+ interrupts = <3 0>;
+ cell-index = <0>;
+ };
+ };
+
+ Also, the system.mhs file defines bus attachments from the processor
+ to the devices. The device tree structure should reflect the bus
+ attachments. Again an example; this system.mhs fragment:
+
+ BEGIN ppc405_virtex4
+ PARAMETER INSTANCE = ppc405_0
+ PARAMETER HW_VER = 1.01.a
+ BUS_INTERFACE DPLB = plb_v34_0
+ BUS_INTERFACE IPLB = plb_v34_0
+ END
+
+ BEGIN opb_intc
+ PARAMETER INSTANCE = opb_intc_0
+ PARAMETER HW_VER = 1.00.c
+ PARAMETER C_BASEADDR = 0xD1000FC0
+ PARAMETER C_HIGHADDR = 0xD1000FDF
+ BUS_INTERFACE SOPB = opb_v20_0
+ END
+
+ BEGIN opb_uart16550
+ PARAMETER INSTANCE = opb_uart16550_0
+ PARAMETER HW_VER = 1.00.d
+ PARAMETER C_BASEADDR = 0xa0000000
+ PARAMETER C_HIGHADDR = 0xa0001FFF
+ BUS_INTERFACE SOPB = opb_v20_0
+ END
+
+ BEGIN plb_v34
+ PARAMETER INSTANCE = plb_v34_0
+ PARAMETER HW_VER = 1.02.a
+ END
+
+ BEGIN plb_bram_if_cntlr
+ PARAMETER INSTANCE = plb_bram_if_cntlr_0
+ PARAMETER HW_VER = 1.00.b
+ PARAMETER C_BASEADDR = 0xFFFF0000
+ PARAMETER C_HIGHADDR = 0xFFFFFFFF
+ BUS_INTERFACE SPLB = plb_v34_0
+ END
+
+ BEGIN plb2opb_bridge
+ PARAMETER INSTANCE = plb2opb_bridge_0
+ PARAMETER HW_VER = 1.01.a
+ PARAMETER C_RNG0_BASEADDR = 0x20000000
+ PARAMETER C_RNG0_HIGHADDR = 0x3FFFFFFF
+ PARAMETER C_RNG1_BASEADDR = 0x60000000
+ PARAMETER C_RNG1_HIGHADDR = 0x7FFFFFFF
+ PARAMETER C_RNG2_BASEADDR = 0x80000000
+ PARAMETER C_RNG2_HIGHADDR = 0xBFFFFFFF
+ PARAMETER C_RNG3_BASEADDR = 0xC0000000
+ PARAMETER C_RNG3_HIGHADDR = 0xDFFFFFFF
+ BUS_INTERFACE SPLB = plb_v34_0
+ BUS_INTERFACE MOPB = opb_v20_0
+ END
+
+ Gives this device tree (some properties removed for clarity):
+
+ plb@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,plb-v34-1.02.a";
+ device_type = "ibm,plb";
+ ranges; // 1:1 translation
+
+ plb_bram_if_cntrl_0: bram@ffff0000 {
+ reg = <ffff0000 10000>;
+ }
+
+ opb@20000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <20000000 20000000 20000000
+ 60000000 60000000 20000000
+ 80000000 80000000 40000000
+ c0000000 c0000000 20000000>;
+
+ opb_uart16550_0: serial@a0000000 {
+ reg = <a00000000 2000>;
+ };
+
+ opb_intc_0: interrupt-controller@d1000fc0 {
+ reg = <d1000fc0 20>;
+ };
+ };
+ };
+
+ That covers the general approach to binding xilinx IP cores into the
+ device tree. The following are bindings for specific devices:
+
+ i) Xilinx ML300 Framebuffer
+
+ Simple framebuffer device from the ML300 reference design (also on the
+ ML403 reference design as well as others).
+
+ Optional properties:
+ - resolution = <xres yres> : pixel resolution of framebuffer. Some
+ implementations use a different resolution.
+ Default is <d#640 d#480>
+ - virt-resolution = <xvirt yvirt> : Size of framebuffer in memory.
+ Default is <d#1024 d#480>.
+ - rotate-display (empty) : rotate display 180 degrees.
+
+ ii) Xilinx SystemACE
+
+ The Xilinx SystemACE device is used to program FPGAs from an FPGA
+ bitstream stored on a CF card. It can also be used as a generic CF
+ interface device.
+
+ Optional properties:
+ - 8-bit (empty) : Set this property for SystemACE in 8 bit mode
+
+ iii) Xilinx EMAC and Xilinx TEMAC
+
+ Xilinx Ethernet devices. In addition to general xilinx properties
+ listed above, nodes for these devices should include a phy-handle
+ property, and may include other common network device properties
+ like local-mac-address.
+
+ iv) Xilinx Uartlite
+
+ Xilinx uartlite devices are simple fixed speed serial ports.
+
+ Required properties:
+ - current-speed : Baud rate of uartlite
+
+ v) Xilinx hwicap
+
+ Xilinx hwicap devices provide access to the configuration logic
+ of the FPGA through the Internal Configuration Access Port
+ (ICAP). The ICAP enables partial reconfiguration of the FPGA,
+ readback of the configuration information, and some control over
+ 'warm boots' of the FPGA fabric.
+
+ Required properties:
+ - xlnx,family : The family of the FPGA, necessary since the
+ capabilities of the underlying ICAP hardware
+ differ between different families. May be
+ 'virtex2p', 'virtex4', or 'virtex5'.
+
+ vi) Xilinx Uart 16550
+
+ Xilinx UART 16550 devices are very similar to the NS16550 but with
+ different register spacing and an offset from the base address.
+
+ Required properties:
+ - clock-frequency : Frequency of the clock input
+ - reg-offset : A value of 3 is required
+ - reg-shift : A value of 2 is required
+
+
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index c8acd86..b486050 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -111,6 +111,8 @@
name: Name assigned by driver to this key (interface or driver name).
type: Driver type string ("wlan", "bluetooth", etc).
+ persistent: Whether the soft blocked state is initialised from
+ non-volatile storage at startup.
state: Current state of the transmitter
0: RFKILL_STATE_SOFT_BLOCKED
transmitter is turned off by software
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index 89093f5..0736518 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -6,8 +6,8 @@
5 -> Leadtek Winfast 2000XP Expert [107d:6611,107d:6613]
6 -> AverTV Studio 303 (M126) [1461:000b]
7 -> MSI TV-@nywhere Master [1462:8606]
- 8 -> Leadtek Winfast DV2000 [107d:6620]
- 9 -> Leadtek PVR 2000 [107d:663b,107d:663c,107d:6632]
+ 8 -> Leadtek Winfast DV2000 [107d:6620,107d:6621]
+ 9 -> Leadtek PVR 2000 [107d:663b,107d:663c,107d:6632,107d:6630,107d:6638,107d:6631,107d:6637,107d:663d]
10 -> IODATA GV-VCP3/PCI [10fc:d003]
11 -> Prolink PlayTV PVR
12 -> ASUS PVR-416 [1043:4823,1461:c111]
@@ -59,7 +59,7 @@
58 -> Pinnacle PCTV HD 800i [11bd:0051]
59 -> DViCO FusionHDTV 5 PCI nano [18ac:d530]
60 -> Pinnacle Hybrid PCTV [12ab:1788]
- 61 -> Winfast TV2000 XP Global [107d:6f18]
+ 61 -> Leadtek TV2000 XP Global [107d:6f18,107d:6618]
62 -> PowerColor RA330 [14f1:ea3d]
63 -> Geniatech X8000-MT DVBT [14f1:8852]
64 -> DViCO FusionHDTV DVB-T PRO [18ac:db30]
diff --git a/Documentation/video4linux/CARDLIST.em28xx b/Documentation/video4linux/CARDLIST.em28xx
index a98a688..873630e 100644
--- a/Documentation/video4linux/CARDLIST.em28xx
+++ b/Documentation/video4linux/CARDLIST.em28xx
@@ -65,3 +65,4 @@
67 -> Terratec Grabby (em2860) [0ccd:0096]
68 -> Terratec AV350 (em2860) [0ccd:0084]
69 -> KWorld ATSC 315U HDTV TV Box (em2882) [eb1a:a313]
+ 70 -> Evga inDtube (em2882)
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index d54c1e4..ba4706a 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -390,6 +390,30 @@
To see which chip variants are supported you can look in the i2c driver code
for the i2c_device_id table. This lists all the possibilities.
+There are two more helper functions:
+
+v4l2_i2c_new_subdev_cfg: this function adds new irq and platform_data
+arguments and has both 'addr' and 'probed_addrs' arguments: if addr is not
+0 then that will be used (non-probing variant), otherwise the probed_addrs
+are probed.
+
+For example: this will probe for address 0x10:
+
+struct v4l2_subdev *sd = v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter,
+ "module_foo", "chipid", 0, NULL, 0, I2C_ADDRS(0x10));
+
+v4l2_i2c_new_subdev_board uses an i2c_board_info struct which is passed
+to the i2c driver and replaces the irq, platform_data and addr arguments.
+
+If the subdev supports the s_config core ops, then that op is called with
+the irq and platform_data arguments after the subdev was setup. The older
+v4l2_i2c_new_(probed_)subdev functions will call s_config as well, but with
+irq set to 0 and platform_data set to NULL.
+
+Note that in the next kernel release the functions v4l2_i2c_new_subdev,
+v4l2_i2c_new_probed_subdev and v4l2_i2c_new_probed_subdev_addr will all be
+replaced by a single v4l2_i2c_new_subdev that is identical to
+v4l2_i2c_new_subdev_cfg but without the irq and platform_data arguments.
struct video_device
-------------------
diff --git a/Documentation/watchdog/hpwdt.txt b/Documentation/watchdog/hpwdt.txt
index 127839e..9c24d5f 100644
--- a/Documentation/watchdog/hpwdt.txt
+++ b/Documentation/watchdog/hpwdt.txt
@@ -19,30 +19,41 @@
not be updated in a timely fashion and a hardware system reset (also known as
an Automatic Server Recovery (ASR)) event will occur.
- The hpwdt driver also has three (3) module parameters. They are the following:
+ The hpwdt driver also has four (4) module parameters. They are the following:
soft_margin - allows the user to set the watchdog timer value
allow_kdump - allows the user to save off a kernel dump image after an NMI
nowayout - basic watchdog parameter that does not allow the timer to
be restarted or an impending ASR to be escaped.
+ priority - determines whether or not the hpwdt driver is first on the
+ die_notify list to handle NMIs or last. The default value
+ for this module parameter is 0 or LAST. If the user wants to
+ enable NMI sourcing then reload the hpwdt driver with
+ priority=1 (and boot with nmi_watchdog=0).
NOTE: More information about watchdog drivers in general, including the ioctl
interface to /dev/watchdog can be found in
Documentation/watchdog/watchdog-api.txt and Documentation/IPMI.txt.
- The NMI sourcing capability is disabled when the driver discovers that the
- nmi_watchdog is turned on (nmi_watchdog = 1). This is due to the inability to
+ The priority parameter was introduced due to other kernel software that relied
+ on handling NMIs (like oprofile). Keeping hpwdt's priority at 0 (or LAST)
+ enables the users of NMIs for non critical events to be work as expected.
+
+ The NMI sourcing capability is disabled by default due to the inability to
distinguish between "NMI Watchdog Ticks" and "HW generated NMI events" in the
Linux kernel. What this means is that the hpwdt nmi handler code is called
each time the NMI signal fires off. This could amount to several thousands of
NMIs in a matter of seconds. If a user sees the Linux kernel's "dazed and
confused" message in the logs or if the system gets into a hung state, then
- the user should reboot with nmi_watchdog=0.
+ the hpwdt driver can be reloaded with the "priority" module parameter set
+ (priority=1).
1. If the kernel has not been booted with nmi_watchdog turned off then
edit /boot/grub/menu.lst and place the nmi_watchdog=0 at the end of the
currently booting kernel line.
2. reboot the sever
+ 3. Once the system comes up perform a rmmod hpwdt
+ 4. insmod /lib/modules/`uname -r`/kernel/drivers/char/watchdog/hpwdt.ko priority=1
Now, the hpwdt can successfully receive and source the NMI and provide a log
message that details the reason for the NMI (as determined by the HP BIOS).
diff --git a/MAINTAINERS b/MAINTAINERS
index 1d47043..fa2a16d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -230,6 +230,13 @@
S: Maintained
F: drivers/net/acenic*
+ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER
+P: Peter Feuerer
+M: peter@piie.net
+W: http://piie.net/?section=acerhdf
+S: Maintained
+F: drivers/platform/x86/acerhdf.c
+
ACER WMI LAPTOP EXTRAS
P: Carlos Corbacho
M: carlos@strangeworlds.co.uk
@@ -653,6 +660,8 @@
L: openezx-devel@lists.openezx.org (subscribers-only)
W: http://www.openezx.org/
S: Maintained
+T: topgit git://git.openezx.org/openezx.git
+F: arch/arm/mach-pxa/ezx.c
ARM/FARADAY FA526 PORT
P: Paulius Zaleckas
@@ -774,11 +783,25 @@
M: philipp.zabel@gmail.com
S: Maintained
+ARM/MIOA701 MACHINE SUPPORT
+P: Robert Jarzmik
+M: robert.jarzmik@free.fr
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+F: arch/arm/mach-pxa/mioa701.c
+S: Maintained
+
ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT
P: Michael Petchkovsky
M: mkpetch@internode.on.net
S: Maintained
+ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
+P: Nelson Castillo
+M: arhuaco@freaks-unidos.net
+L: openmoko-kernel@lists.openmoko.org (subscribers-only)
+W: http://wiki.openmoko.org/wiki/Neo_FreeRunner
+S: Supported
+
ARM/TOSA MACHINE SUPPORT
P: Dmitry Eremin-Solenikov
M: dbaryshkov@gmail.com
@@ -792,6 +815,12 @@
W: http://hackndev.com
S: Maintained
+ARM/PALM TREO 680 SUPPORT
+P: Tomas Cech
+M: sleep_walker@suse.cz
+W: http://hackndev.com
+S: Maintained
+
ARM/PALMZ72 SUPPORT
P: Sergey Lapin
M: slapin@ossfans.org
@@ -891,8 +920,7 @@
P: Karol Kozimor
M: sziwan@users.sourceforge.net
L: acpi4asus-user@lists.sourceforge.net
-W: http://sourceforge.net/projects/acpi4asus
-W: http://xf.iksaif.net/acpi4asus
+W: http://acpi4asus.sf.net
S: Maintained
F: arch/x86/kernel/acpi/boot.c
F: drivers/platform/x86/asus_acpi.c
@@ -908,8 +936,7 @@
P: Corentin Chary
M: corentincj@iksaif.net
L: acpi4asus-user@lists.sourceforge.net
-W: http://sourceforge.net/projects/acpi4asus
-W: http://xf.iksaif.net/acpi4asus
+W: http://acpi4asus.sf.net
S: Maintained
F: drivers/platform/x86/asus-laptop.c
@@ -1614,7 +1641,7 @@
M: starvik@axis.com
P: Jesper Nilsson
M: jesper.nilsson@axis.com
-L: dev-etrax@axis.com
+L: linux-cris-kernel@axis.com
W: http://developer.axis.com
S: Maintained
F: arch/cris/
@@ -2088,7 +2115,7 @@
P: Corentin Chary
M: corentincj@iksaif.net
L: acpi4asus-user@lists.sourceforge.net
-W: http://sourceforge.net/projects/acpi4asus
+W: http://acpi4asus.sf.net
S: Maintained
F: drivers/platform/x86/eeepc-laptop.c
@@ -2460,6 +2487,14 @@
F: drivers/net/wan/pci200syn.c
F: drivers/net/wan/wanxl*
+GENERIC INCLUDE/ASM HEADER FILES
+P: Arnd Bergmann
+M: arnd@arndb.de
+L: linux-arch@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic.git
+S: Maintained
+F: include/asm-generic
+
GFS2 FILE SYSTEM
P: Steven Whitehouse
M: swhiteho@redhat.com
@@ -2802,10 +2837,10 @@
F: drivers/scsi/ips.*
IDE SUBSYSTEM
-P: Bartlomiej Zolnierkiewicz
-M: bzolnier@gmail.com
+P: David S. Miller
+M: davem@davemloft.net
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide-2.6.git
S: Maintained
F: Documentation/ide/
F: drivers/ide/
@@ -2853,7 +2888,7 @@
M: slapin@ossfans.org
L: linux-zigbee-devel@lists.sourceforge.net
W: http://apps.sourceforge.net/trac/linux-zigbee
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lumag/lowpan.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
S: Maintained
F: net/ieee802154/
F: drivers/ieee802154/
@@ -3230,7 +3265,6 @@
S: Maintained
F: fs/jffs2/
F: include/linux/jffs2.h
-F: include/mtd/jffs2-user.h
JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
P: Stephen Tweedie
diff --git a/Makefile b/Makefile
index 46e1c9d..d1216fe 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
-SUBLEVEL = 30
-EXTRAVERSION =
+SUBLEVEL = 31
+EXTRAVERSION = -rc1
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2947510..aef63c8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1241,7 +1241,7 @@
menu "CPU Power Management"
-if (ARCH_SA1100 || ARCH_INTEGRATOR || ARCH_OMAP || ARCH_PXA)
+if (ARCH_SA1100 || ARCH_INTEGRATOR || ARCH_OMAP || ARCH_PXA || ARCH_S3C64XX)
source "drivers/cpufreq/Kconfig"
@@ -1272,6 +1272,10 @@
default y
select CPU_FREQ_DEFAULT_GOV_USERSPACE
+config CPU_FREQ_S3C64XX
+ bool "CPUfreq support for Samsung S3C64XX CPUs"
+ depends on CPU_FREQ && CPU_S3C6410
+
endif
source "drivers/cpuidle/Kconfig"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 01d49be..4515728 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -674,6 +674,15 @@
b __armv4_mmu_cache_off
b __armv5tej_mmu_cache_flush
+#ifdef CONFIG_CPU_FEROCEON_OLD_ID
+ /* this conflicts with the standard ARMv5TE entry */
+ .long 0x41009260 @ Old Feroceon
+ .long 0xff00fff0
+ b __armv4_mmu_cache_on
+ b __armv4_mmu_cache_off
+ b __armv5tej_mmu_cache_flush
+#endif
+
.word 0x66015261 @ FA526
.word 0xff01fff1
b __fa526_cache_on
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 664c7b8..337741f 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -117,7 +117,7 @@
u32 val;
spin_lock(&irq_controller_lock);
- irq_desc[irq].cpu = cpu;
+ irq_desc[irq].node = cpu;
val = readl(reg) & ~(0xff << shift);
val |= 1 << (cpu + shift);
writel(val, reg);
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 887c6eb..6ed8983 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -229,14 +229,18 @@
{
struct vic_device *v = vic_from_irq(irq);
unsigned int off = irq & 31;
+ u32 bit = 1 << off;
if (!v)
return -EINVAL;
+ if (!(bit & v->resume_sources))
+ return -EINVAL;
+
if (on)
- v->resume_irqs |= 1 << off;
+ v->resume_irqs |= bit;
else
- v->resume_irqs &= ~(1 << off);
+ v->resume_irqs &= ~bit;
return 0;
}
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
new file mode 100644
index 0000000..e49ed40
--- /dev/null
+++ b/arch/arm/configs/mini2440_defconfig
@@ -0,0 +1,2097 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.30-rc6
+# Wed May 20 12:29:51 2009
+#
+CONFIG_ARM=y
+CONFIG_HAVE_PWM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_GENERIC_TIME is not set
+# CONFIG_GENERIC_CLOCKEVENTS is not set
+CONFIG_MMU=y
+CONFIG_NO_IOPORT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_INTEGRITY=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+CONFIG_ARCH_S3C2410=y
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_W90X900 is not set
+CONFIG_PLAT_S3C24XX=y
+CONFIG_S3C2410_CLOCK=y
+CONFIG_CPU_S3C244X=y
+CONFIG_S3C24XX_PWM=y
+CONFIG_S3C24XX_GPIO_EXTRA=0
+CONFIG_S3C2410_DMA=y
+# CONFIG_S3C2410_DMA_DEBUG is not set
+CONFIG_S3C24XX_ADC=y
+CONFIG_PLAT_S3C=y
+CONFIG_CPU_LLSERIAL_S3C2440_ONLY=y
+CONFIG_CPU_LLSERIAL_S3C2440=y
+
+#
+# Boot options
+#
+# CONFIG_S3C_BOOT_WATCHDOG is not set
+# CONFIG_S3C_BOOT_ERROR_RESET is not set
+CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+
+#
+# Power management
+#
+# CONFIG_S3C2410_PM_DEBUG is not set
+# CONFIG_S3C2410_PM_CHECK is not set
+CONFIG_S3C_LOWLEVEL_UART_PORT=0
+CONFIG_S3C_GPIO_SPACE=0
+
+#
+# S3C2400 Machines
+#
+CONFIG_S3C2410_PM=y
+CONFIG_S3C2410_GPIO=y
+
+#
+# S3C2410 Machines
+#
+# CONFIG_ARCH_SMDK2410 is not set
+# CONFIG_ARCH_H1940 is not set
+# CONFIG_MACH_N30 is not set
+# CONFIG_ARCH_BAST is not set
+# CONFIG_MACH_OTOM is not set
+# CONFIG_MACH_AML_M5900 is not set
+# CONFIG_MACH_TCT_HAMMER is not set
+# CONFIG_MACH_VR1000 is not set
+# CONFIG_MACH_QT2410 is not set
+
+#
+# S3C2412 Machines
+#
+# CONFIG_MACH_JIVE is not set
+# CONFIG_MACH_SMDK2413 is not set
+# CONFIG_MACH_SMDK2412 is not set
+# CONFIG_MACH_VSTMS is not set
+CONFIG_CPU_S3C2440=y
+CONFIG_S3C2440_DMA=y
+
+#
+# S3C2440 Machines
+#
+# CONFIG_MACH_ANUBIS is not set
+# CONFIG_MACH_OSIRIS is not set
+# CONFIG_MACH_RX3715 is not set
+# CONFIG_ARCH_S3C2440 is not set
+# CONFIG_MACH_NEXCODER_2440 is not set
+# CONFIG_MACH_AT2440EVB is not set
+CONFIG_MACH_MINI2440=y
+
+#
+# S3C2442 Machines
+#
+
+#
+# S3C2443 Machines
+#
+# CONFIG_MACH_SMDK2443 is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4T=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_OUTER_CACHE is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=200
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_APM_EMULATION=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=m
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_XTABLES is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_GARP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=m
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_WIRELESS_OLD_REGULATORY=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_FTL=y
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=y
+CONFIG_RFD_FTL=y
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=y
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_VERIFY_WRITE=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_S3C2410=y
+# CONFIG_MTD_NAND_S3C2410_DEBUG is not set
+# CONFIG_MTD_NAND_S3C2410_HWECC is not set
+# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+CONFIG_MTD_LPDDR=y
+CONFIG_MTD_QINFO_PROBE=y
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=y
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=m
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+CONFIG_DM9000=y
+CONFIG_DM9000_DEBUGLEVEL=4
+# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+# CONFIG_LIBERTAS_USB is not set
+CONFIG_LIBERTAS_SDIO=m
+# CONFIG_LIBERTAS_SPI is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_AR9170_USB is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+CONFIG_ZD1211RW=m
+CONFIG_ZD1211RW_DEBUG=y
+# CONFIG_RT2X00 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+# CONFIG_PPPOE is not set
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=y
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_UARTS=3
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_S3C2440=y
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=128
+CONFIG_IPMI_HANDLER=m
+# CONFIG_IPMI_PANIC_EVENT is not set
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPMI_POWEROFF=m
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_S3C2410=y
+CONFIG_I2C_SIMTEC=y
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+CONFIG_SENSORS_TSL2550=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=y
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_S3C24XX=y
+# CONFIG_SPI_S3C24XX_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IBMAEM is not set
+# CONFIG_SENSORS_IBMPEX is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+CONFIG_THERMAL=m
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_S3C2410_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEO_V4L1=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_AU0828 is not set
+CONFIG_SOC_CAMERA=m
+# CONFIG_SOC_CAMERA_MT9M001 is not set
+# CONFIG_SOC_CAMERA_MT9M111 is not set
+# CONFIG_SOC_CAMERA_MT9T031 is not set
+# CONFIG_SOC_CAMERA_MT9V022 is not set
+# CONFIG_SOC_CAMERA_TW9910 is not set
+CONFIG_SOC_CAMERA_PLATFORM=m
+# CONFIG_SOC_CAMERA_OV772X is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+CONFIG_V4L_USB_DRIVERS=y
+# CONFIG_USB_VIDEO_CLASS is not set
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_MR97310A is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_SQ905 is not set
+# CONFIG_USB_GSPCA_SQ905C is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+CONFIG_USB_GSPCA_ZC3XX=m
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_HDPVR is not set
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_VIDEO_CX231XX is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_USB_VICAM is not set
+# CONFIG_USB_IBMCAM is not set
+# CONFIG_USB_KONICAWC is not set
+# CONFIG_USB_QUICKCAM_MESSENGER is not set
+# CONFIG_USB_ET61X251 is not set
+# CONFIG_VIDEO_OVCAMCHIP is not set
+# CONFIG_USB_OV511 is not set
+# CONFIG_USB_SE401 is not set
+# CONFIG_USB_SN9C102 is not set
+# CONFIG_USB_STV680 is not set
+# CONFIG_USB_ZC0301 is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_USB_PWC_INPUT_EVDEV is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_SI470X is not set
+# CONFIG_USB_MR800 is not set
+# CONFIG_RADIO_TEA5764 is not set
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Supported USB Adapters
+#
+# CONFIG_DVB_USB is not set
+# CONFIG_DVB_SIANO_SMS1XXX is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+# CONFIG_DVB_B2C2_FLEXCOP is not set
+
+#
+# Supported DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_S3C2410=y
+# CONFIG_FB_S3C2410_DEBUG is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_MINI_4x6=y
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_SOC=y
+CONFIG_SND_S3C24XX_SOC=y
+CONFIG_SND_S3C24XX_SOC_I2S=y
+# CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650 is not set
+CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_L3=y
+CONFIG_SND_SOC_UDA134X=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+# CONFIG_DRAGONRISE_FF is not set
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_TOPSEED=y
+# CONFIG_THRUSTMASTER_FF is not set
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+# CONFIG_USB_PRINTER is not set
+CONFIG_USB_WDM=m
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DATAFAB=m
+# CONFIG_USB_STORAGE_FREECOM is not set
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_EZUSB is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+CONFIG_USB_SERIAL_CP210X=m
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=m
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+CONFIG_USB_SERIAL_SPCP8X5=m
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+CONFIG_USB_GADGET_S3C2410=y
+CONFIG_USB_S3C2410=y
+# CONFIG_USB_S3C2410_DEBUG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_S3C=y
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+CONFIG_LEDS_S3C24XX=y
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_S3C=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_ROMFS_BACKED_BY_BLOCK is not set
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+CONFIG_ROMFS_BACKED_BY_BOTH=y
+CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_ROMFS_ON_MTD=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+CONFIG_DEBUG_S3C_UART=0
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_FILE_CAPABILITIES=y
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_FIPS=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_ZLIB=m
+CONFIG_CRYPTO_LZO=m
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 94cc58e..0e97b8c 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -389,6 +389,8 @@
#define __NR_inotify_init1 (__NR_SYSCALL_BASE+360)
#define __NR_preadv (__NR_SYSCALL_BASE+361)
#define __NR_pwritev (__NR_SYSCALL_BASE+362)
+#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
+#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 1680e9e..f776e72 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -372,6 +372,8 @@
/* 360 */ CALL(sys_inotify_init1)
CALL(sys_preadv)
CALL(sys_pwritev)
+ CALL(sys_rt_tgsigqueueinfo)
+ CALL(sys_perf_counter_open)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6874c7d..096f600 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -167,7 +167,7 @@
#ifdef CONFIG_SMP
cpumask_setall(bad_irq_desc.affinity);
- bad_irq_desc.cpu = smp_processor_id();
+ bad_irq_desc.node = smp_processor_id();
#endif
init_arch_irq();
}
@@ -176,7 +176,7 @@
static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
{
- pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
+ pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of(cpu));
@@ -195,7 +195,7 @@
for (i = 0; i < NR_IRQS; i++) {
struct irq_desc *desc = irq_desc + i;
- if (desc->cpu == cpu) {
+ if (desc->node == cpu) {
unsigned int newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1585423..39196df 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -114,9 +114,6 @@
/*
* Function pointers to optional machine specific functions
*/
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -130,20 +127,19 @@
*/
static void default_idle(void)
{
- if (hlt_counter)
- cpu_relax();
- else {
- local_irq_disable();
- if (!need_resched())
- arch_idle();
- local_irq_enable();
- }
+ if (!need_resched())
+ arch_idle();
+ local_irq_enable();
}
+void (*pm_idle)(void) = default_idle;
+EXPORT_SYMBOL(pm_idle);
+
/*
- * The idle thread. We try to conserve power, while trying to keep
- * overall latency low. The architecture specific idle is passed
- * a value to indicate the level of "idleness" of the system.
+ * The idle thread, has rather strange semantics for calling pm_idle,
+ * but this is what x86 does and we need to do the same, so that
+ * things like cpuidle get called in the same way. The only difference
+ * is that we always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
@@ -151,21 +147,31 @@
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
-
+ tick_nohz_stop_sched_tick(1);
+ leds_event(led_idle_start);
+ while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id())) {
- leds_event(led_idle_start);
- cpu_die();
- }
+ if (cpu_is_offline(smp_processor_id()))
+ cpu_die();
#endif
- if (!idle)
- idle = default_idle;
- leds_event(led_idle_start);
- tick_nohz_stop_sched_tick(1);
- while (!need_resched())
- idle();
+ local_irq_disable();
+ if (hlt_counter) {
+ local_irq_enable();
+ cpu_relax();
+ } else {
+ stop_critical_timings();
+ pm_idle();
+ start_critical_timings();
+ /*
+ * This will eventually be removed - pm_idle
+ * functions should always return with IRQs
+ * enabled.
+ */
+ WARN_ON(irqs_disabled());
+ local_irq_enable();
+ }
+ }
leds_event(led_idle_end);
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
@@ -352,6 +358,23 @@
" .size kernel_thread_helper, . - kernel_thread_helper\n"
" .previous");
+#ifdef CONFIG_ARM_UNWIND
+extern void kernel_thread_exit(long code);
+asm( ".section .text\n"
+" .align\n"
+" .type kernel_thread_exit, #function\n"
+"kernel_thread_exit:\n"
+" .fnstart\n"
+" .cantunwind\n"
+" bl do_exit\n"
+" nop\n"
+" .fnend\n"
+" .size kernel_thread_exit, . - kernel_thread_exit\n"
+" .previous");
+#else
+#define kernel_thread_exit do_exit
+#endif
+
/*
* Create a kernel thread.
*/
@@ -363,7 +386,7 @@
regs.ARM_r1 = (unsigned long)arg;
regs.ARM_r2 = (unsigned long)fn;
- regs.ARM_r3 = (unsigned long)do_exit;
+ regs.ARM_r3 = (unsigned long)kernel_thread_exit;
regs.ARM_pc = (unsigned long)kernel_thread_helper;
regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 1dedc2c..dd56e11 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -212,7 +212,8 @@
ctrl->vrs[14] = *vsp++;
ctrl->vrs[SP] = (unsigned long)vsp;
} else if (insn == 0xb0) {
- ctrl->vrs[PC] = ctrl->vrs[LR];
+ if (ctrl->vrs[PC] == 0)
+ ctrl->vrs[PC] = ctrl->vrs[LR];
/* no further processing */
ctrl->entries = 0;
} else if (insn == 0xb1) {
@@ -309,18 +310,20 @@
}
while (ctrl.entries > 0) {
- int urc;
-
- if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
- return -URC_FAILURE;
- urc = unwind_exec_insn(&ctrl);
+ int urc = unwind_exec_insn(&ctrl);
if (urc < 0)
return urc;
+ if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
+ return -URC_FAILURE;
}
if (ctrl.vrs[PC] == 0)
ctrl.vrs[PC] = ctrl.vrs[LR];
+ /* check for infinite loop */
+ if (frame->pc == ctrl.vrs[PC])
+ return -URC_FAILURE;
+
frame->fp = ctrl.vrs[FP];
frame->sp = ctrl.vrs[SP];
frame->lr = ctrl.vrs[LR];
@@ -332,7 +335,6 @@
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- unsigned long high, low;
register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -362,9 +364,6 @@
frame.pc = thread_saved_pc(tsk);
}
- low = frame.sp & ~(THREAD_SIZE - 1);
- high = low + THREAD_SIZE;
-
while (1) {
int urc;
unsigned long where = frame.pc;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 6c07797..4340bf3 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -84,6 +84,14 @@
*(.exitcall.exit)
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
+#ifndef CONFIG_HOTPLUG_CPU
+ *(.ARM.exidx.cpuexit.text)
+ *(.ARM.extab.cpuexit.text)
+#endif
+#ifndef CONFIG_HOTPLUG
+ *(.ARM.exidx.devexit.text)
+ *(.ARM.extab.devexit.text)
+#endif
#ifndef CONFIG_MMU
*(.fixup)
*(__ex_table)
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index aa48284..b520c4b 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -68,10 +68,14 @@
/* none == NAND_ECC_NONE (strongly *not* advised!!)
* soft == NAND_ECC_SOFT
- * 1-bit == NAND_ECC_HW
- * 4-bit == NAND_ECC_HW_SYNDROME (not on all chips)
+ * else == NAND_ECC_HW, according to ecc_bits
+ *
+ * All DaVinci-family chips support 1-bit hardware ECC.
+ * Newer ones also support 4-bit ECC, but are awkward
+ * using it with large page chips.
*/
nand_ecc_modes_t ecc_mode;
+ u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
unsigned options;
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index ba528f8..b0665f1 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -302,7 +302,7 @@
udelay(1);
}
- if (i < MAX_CLOCK_ENABLE_WAIT)
+ if (i <= MAX_CLOCK_ENABLE_WAIT)
pr_debug("Clock %s stable after %d loops\n", name, i);
else
printk(KERN_ERR "Clock %s didn't enable in %d tries\n",
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 9e43fe5..045da92 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -286,6 +286,20 @@
#define MIN_SDRC_DLL_LOCK_FREQ 83000000
+#define CYCLES_PER_MHZ 1000000
+
+/* Scale factor for fixed-point arith in omap3_core_dpll_m2_set_rate() */
+#define SDRC_MPURATE_SCALE 8
+
+/* 2^SDRC_MPURATE_BASE_SHIFT: MPU MHz that SDRC_MPURATE_LOOPS is defined for */
+#define SDRC_MPURATE_BASE_SHIFT 9
+
+/*
+ * SDRC_MPURATE_LOOPS: Number of MPU loops to execute at
+ * 2^MPURATE_BASE_SHIFT MHz for SDRC to stabilize
+ */
+#define SDRC_MPURATE_LOOPS 96
+
/**
* omap3_dpll_recalc - recalculate DPLL rate
* @clk: DPLL struct clk
@@ -709,7 +723,8 @@
{
u32 new_div = 0;
u32 unlock_dll = 0;
- unsigned long validrate, sdrcrate;
+ u32 c;
+ unsigned long validrate, sdrcrate, mpurate;
struct omap_sdrc_params *sp;
if (!clk || !rate)
@@ -718,18 +733,15 @@
if (clk != &dpll3_m2_ck)
return -EINVAL;
- if (rate == clk->rate)
- return 0;
-
validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
if (validrate != rate)
return -EINVAL;
sdrcrate = sdrc_ick.rate;
if (rate > clk->rate)
- sdrcrate <<= ((rate / clk->rate) - 1);
+ sdrcrate <<= ((rate / clk->rate) >> 1);
else
- sdrcrate >>= ((clk->rate / rate) - 1);
+ sdrcrate >>= ((clk->rate / rate) >> 1);
sp = omap2_sdrc_get_params(sdrcrate);
if (!sp)
@@ -740,17 +752,25 @@
unlock_dll = 1;
}
+ /*
+ * XXX This only needs to be done when the CPU frequency changes
+ */
+ mpurate = arm_fck.rate / CYCLES_PER_MHZ;
+ c = (mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT;
+ c += 1; /* for safety */
+ c *= SDRC_MPURATE_LOOPS;
+ c >>= SDRC_MPURATE_SCALE;
+ if (c == 0)
+ c = 1;
+
pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate,
validrate);
pr_debug("clock: SDRC timing params used: %08x %08x %08x\n",
sp->rfr_ctrl, sp->actim_ctrla, sp->actim_ctrlb);
- /* REVISIT: SRAM code doesn't support other M2 divisors yet */
- WARN_ON(new_div != 1 && new_div != 2);
-
- /* REVISIT: Add SDRC_MR changing to this code also */
omap3_configure_core_dpll(sp->rfr_ctrl, sp->actim_ctrla,
- sp->actim_ctrlb, new_div, unlock_dll);
+ sp->actim_ctrlb, new_div, unlock_dll, c,
+ sp->mr, rate > clk->rate);
return 0;
}
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 32afd94..3a86b0f 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <asm/tlb.h>
@@ -241,6 +242,40 @@
omapfb_reserve_sdram();
}
+/*
+ * omap2_init_reprogram_sdrc - reprogram SDRC timing parameters
+ *
+ * Sets the CORE DPLL3 M2 divider to the same value that it's at
+ * currently. This has the effect of setting the SDRC SDRAM AC timing
+ * registers to the values currently defined by the kernel. Currently
+ * only defined for OMAP3; will return 0 if called on OMAP2. Returns
+ * -EINVAL if the dpll3_m2_ck cannot be found, 0 if called on OMAP2,
+ * or passes along the return value of clk_set_rate().
+ */
+static int __init _omap2_init_reprogram_sdrc(void)
+{
+ struct clk *dpll3_m2_ck;
+ int v = -EINVAL;
+ long rate;
+
+ if (!cpu_is_omap34xx())
+ return 0;
+
+ dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck");
+ if (!dpll3_m2_ck)
+ return -EINVAL;
+
+ rate = clk_get_rate(dpll3_m2_ck);
+ pr_info("Reprogramming SDRC clock to %ld Hz\n", rate);
+ v = clk_set_rate(dpll3_m2_ck, rate);
+ if (v)
+ pr_err("dpll3_m2_clk rate change failed: %d\n", v);
+
+ clk_put(dpll3_m2_ck);
+
+ return v;
+}
+
void __init omap2_init_common_hw(struct omap_sdrc_params *sp)
{
omap2_mux_init();
@@ -249,6 +284,7 @@
clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
omap2_clk_init();
omap2_sdrc_init(sp);
+ _omap2_init_reprogram_sdrc();
#endif
gpmc_init();
}
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 73e2971..983f1cb 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -1099,7 +1099,7 @@
(c++ < PWRDM_TRANSITION_BAILOUT))
udelay(1);
- if (c >= PWRDM_TRANSITION_BAILOUT) {
+ if (c > PWRDM_TRANSITION_BAILOUT) {
printk(KERN_ERR "powerdomain: waited too long for "
"powerdomain %s to complete transition\n", pwrdm->name);
return -EAGAIN;
diff --git a/arch/arm/mach-omap2/sram34xx.S b/arch/arm/mach-omap2/sram34xx.S
index c080c82..f41f8d9 100644
--- a/arch/arm/mach-omap2/sram34xx.S
+++ b/arch/arm/mach-omap2/sram34xx.S
@@ -3,13 +3,12 @@
*
* Omap3 specific functions that need to be run in internal SRAM
*
- * (C) Copyright 2007
- * Texas Instruments Inc.
- * Rajendra Nayak <rnayak@ti.com>
+ * Copyright (C) 2004, 2007, 2008 Texas Instruments, Inc.
+ * Copyright (C) 2008 Nokia Corporation
*
- * (C) Copyright 2004
- * Texas Instruments, <www.ti.com>
+ * Rajendra Nayak <rnayak@ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -37,61 +36,112 @@
.text
+/* r4 parameters */
+#define SDRC_NO_UNLOCK_DLL 0x0
+#define SDRC_UNLOCK_DLL 0x1
+
+/* SDRC_DLLA_CTRL bit settings */
+#define FIXEDDELAY_SHIFT 24
+#define FIXEDDELAY_MASK (0xff << FIXEDDELAY_SHIFT)
+#define DLLIDLE_MASK 0x4
+
/*
- * Change frequency of core dpll
- * r0 = sdrc_rfr_ctrl r1 = sdrc_actim_ctrla r2 = sdrc_actim_ctrlb r3 = M2
- * r4 = Unlock SDRC DLL? (1 = yes, 0 = no) -- only unlock DLL for
+ * SDRC_DLLA_CTRL default values: TI hardware team indicates that
+ * FIXEDDELAY should be initialized to 0xf. This apparently was
+ * empirically determined during process testing, so no derivation
+ * was provided.
+ */
+#define FIXEDDELAY_DEFAULT (0x0f << FIXEDDELAY_SHIFT)
+
+/* SDRC_DLLA_STATUS bit settings */
+#define LOCKSTATUS_MASK 0x4
+
+/* SDRC_POWER bit settings */
+#define SRFRONIDLEREQ_MASK 0x40
+#define PWDENA_MASK 0x4
+
+/* CM_IDLEST1_CORE bit settings */
+#define ST_SDRC_MASK 0x2
+
+/* CM_ICLKEN1_CORE bit settings */
+#define EN_SDRC_MASK 0x2
+
+/* CM_CLKSEL1_PLL bit settings */
+#define CORE_DPLL_CLKOUT_DIV_SHIFT 0x1b
+
+/*
+ * omap3_sram_configure_core_dpll - change DPLL3 M2 divider
+ * r0 = new SDRC_RFR_CTRL register contents
+ * r1 = new SDRC_ACTIM_CTRLA register contents
+ * r2 = new SDRC_ACTIM_CTRLB register contents
+ * r3 = new M2 divider setting (only 1 and 2 supported right now)
+ * r4 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for
* SDRC rates < 83MHz
+ * r5 = number of MPU cycles to wait for SDRC to stabilize after
+ * reprogramming the SDRC when switching to a slower MPU speed
+ * r6 = new SDRC_MR_0 register value
+ * r7 = increasing SDRC rate? (1 = yes, 0 = no)
+ *
*/
ENTRY(omap3_sram_configure_core_dpll)
stmfd sp!, {r1-r12, lr} @ store regs to stack
ldr r4, [sp, #52] @ pull extra args off the stack
+ ldr r5, [sp, #56] @ load extra args from the stack
+ ldr r6, [sp, #60] @ load extra args from the stack
+ ldr r7, [sp, #64] @ load extra args from the stack
dsb @ flush buffered writes to interconnect
- cmp r3, #0x2
- blne configure_sdrc
- cmp r4, #0x1
+ cmp r7, #1 @ if increasing SDRC clk rate,
+ bleq configure_sdrc @ program the SDRC regs early (for RFR)
+ cmp r4, #SDRC_UNLOCK_DLL @ set the intended DLL state
bleq unlock_dll
blne lock_dll
- bl sdram_in_selfrefresh @ put the SDRAM in self refresh
- bl configure_core_dpll
- bl enable_sdrc
- cmp r4, #0x1
+ bl sdram_in_selfrefresh @ put SDRAM in self refresh, idle SDRC
+ bl configure_core_dpll @ change the DPLL3 M2 divider
+ bl enable_sdrc @ take SDRC out of idle
+ cmp r4, #SDRC_UNLOCK_DLL @ wait for DLL status to change
bleq wait_dll_unlock
blne wait_dll_lock
- cmp r3, #0x1
- blne configure_sdrc
+ cmp r7, #1 @ if increasing SDRC clk rate,
+ beq return_to_sdram @ return to SDRAM code, otherwise,
+ bl configure_sdrc @ reprogram SDRC regs now
+ mov r12, r5
+ bl wait_clk_stable @ wait for SDRC to stabilize
+return_to_sdram:
isb @ prevent speculative exec past here
mov r0, #0 @ return value
ldmfd sp!, {r1-r12, pc} @ restore regs and return
unlock_dll:
ldr r11, omap3_sdrc_dlla_ctrl
ldr r12, [r11]
- orr r12, r12, #0x4
+ and r12, r12, #FIXEDDELAY_MASK
+ orr r12, r12, #FIXEDDELAY_DEFAULT
+ orr r12, r12, #DLLIDLE_MASK
str r12, [r11] @ (no OCP barrier needed)
bx lr
lock_dll:
ldr r11, omap3_sdrc_dlla_ctrl
ldr r12, [r11]
- bic r12, r12, #0x4
+ bic r12, r12, #DLLIDLE_MASK
str r12, [r11] @ (no OCP barrier needed)
bx lr
sdram_in_selfrefresh:
ldr r11, omap3_sdrc_power @ read the SDRC_POWER register
ldr r12, [r11] @ read the contents of SDRC_POWER
mov r9, r12 @ keep a copy of SDRC_POWER bits
- orr r12, r12, #0x40 @ enable self refresh on idle req
- bic r12, r12, #0x4 @ clear PWDENA
+ orr r12, r12, #SRFRONIDLEREQ_MASK @ enable self refresh on idle
+ bic r12, r12, #PWDENA_MASK @ clear PWDENA
str r12, [r11] @ write back to SDRC_POWER register
ldr r12, [r11] @ posted-write barrier for SDRC
+idle_sdrc:
ldr r11, omap3_cm_iclken1_core @ read the CM_ICLKEN1_CORE reg
ldr r12, [r11]
- bic r12, r12, #0x2 @ disable iclk bit for SDRC
+ bic r12, r12, #EN_SDRC_MASK @ disable iclk bit for SDRC
str r12, [r11]
wait_sdrc_idle:
ldr r11, omap3_cm_idlest1_core
ldr r12, [r11]
- and r12, r12, #0x2 @ check for SDRC idle
- cmp r12, #2
+ and r12, r12, #ST_SDRC_MASK @ check for SDRC idle
+ cmp r12, #ST_SDRC_MASK
bne wait_sdrc_idle
bx lr
configure_core_dpll:
@@ -99,36 +149,23 @@
ldr r12, [r11]
ldr r10, core_m2_mask_val @ modify m2 for core dpll
and r12, r12, r10
- orr r12, r12, r3, lsl #0x1B @ r3 contains the M2 val
+ orr r12, r12, r3, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT
str r12, [r11]
ldr r12, [r11] @ posted-write barrier for CM
- mov r12, #0x800 @ wait for the clock to stabilise
- cmp r3, #2
- bne wait_clk_stable
bx lr
wait_clk_stable:
subs r12, r12, #1
bne wait_clk_stable
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
bx lr
enable_sdrc:
ldr r11, omap3_cm_iclken1_core
ldr r12, [r11]
- orr r12, r12, #0x2 @ enable iclk bit for SDRC
+ orr r12, r12, #EN_SDRC_MASK @ enable iclk bit for SDRC
str r12, [r11]
wait_sdrc_idle1:
ldr r11, omap3_cm_idlest1_core
ldr r12, [r11]
- and r12, r12, #0x2
+ and r12, r12, #ST_SDRC_MASK
cmp r12, #0
bne wait_sdrc_idle1
restore_sdrc_power_val:
@@ -138,14 +175,14 @@
wait_dll_lock:
ldr r11, omap3_sdrc_dlla_status
ldr r12, [r11]
- and r12, r12, #0x4
- cmp r12, #0x4
+ and r12, r12, #LOCKSTATUS_MASK
+ cmp r12, #LOCKSTATUS_MASK
bne wait_dll_lock
bx lr
wait_dll_unlock:
ldr r11, omap3_sdrc_dlla_status
ldr r12, [r11]
- and r12, r12, #0x4
+ and r12, r12, #LOCKSTATUS_MASK
cmp r12, #0x0
bne wait_dll_unlock
bx lr
@@ -156,7 +193,9 @@
str r1, [r11]
ldr r11, omap3_sdrc_actim_ctrlb
str r2, [r11]
- ldr r2, [r11] @ posted-write barrier for SDRC
+ ldr r11, omap3_sdrc_mr_0
+ str r6, [r11]
+ ldr r6, [r11] @ posted-write barrier for SDRC
bx lr
omap3_sdrc_power:
@@ -173,6 +212,8 @@
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_0)
omap3_sdrc_actim_ctrlb:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_0)
+omap3_sdrc_mr_0:
+ .word OMAP34XX_SDRC_REGADDR(SDRC_MR_0)
omap3_sdrc_dlla_status:
.word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
omap3_sdrc_dlla_ctrl:
diff --git a/arch/arm/mach-orion5x/addr-map.c b/arch/arm/mach-orion5x/addr-map.c
index 6f3f77d..d78731e 100644
--- a/arch/arm/mach-orion5x/addr-map.c
+++ b/arch/arm/mach-orion5x/addr-map.c
@@ -200,6 +200,6 @@
int __init orion5x_setup_sram_win(void)
{
- return setup_cpu_win(win_alloc_count, ORION5X_SRAM_PHYS_BASE,
+ return setup_cpu_win(win_alloc_count++, ORION5X_SRAM_PHYS_BASE,
ORION5X_SRAM_SIZE, TARGET_SRAM, ATTR_SRAM, -1);
}
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index eafcc49..f87fa12 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -562,7 +562,7 @@
.resource = orion5x_crypto_res,
};
-int __init orion5x_crypto_init(void)
+static int __init orion5x_crypto_init(void)
{
int ret;
@@ -697,6 +697,14 @@
}
/*
+ * The 5082/5181l/5182/6082/6082l/6183 have crypto
+ * while 5180n/5181/5281 don't have crypto.
+ */
+ if ((dev == MV88F5181_DEV_ID && rev >= MV88F5181L_REV_A0) ||
+ dev == MV88F5182_DEV_ID || dev == MV88F6183_DEV_ID)
+ orion5x_crypto_init();
+
+ /*
* Register watchdog driver
*/
orion5x_wdt_init();
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index de483e8..8f00450 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -38,7 +38,6 @@
void orion5x_uart0_init(void);
void orion5x_uart1_init(void);
void orion5x_xor_init(void);
-int orion5x_crypto_init(void);
/*
* PCIe/PCI functions.
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index f4533f8..89c992b 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -401,6 +401,16 @@
Say Y here if you intend to run this kernel on Palm Zire 72
handheld computer.
+config MACH_TREO680
+ bool "Palm Treo 680"
+ default y
+ depends on ARCH_PXA_PALM
+ select PXA27x
+ select IWMMXT
+ help
+ Say Y here if you intend to run this kernel on Palm Treo 680
+ smartphone.
+
config MACH_PALMLD
bool "Palm LifeDrive"
default y
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index d18ffef..d4c6122 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -62,6 +62,7 @@
obj-$(CONFIG_MACH_PALMTX) += palmtx.o
obj-$(CONFIG_MACH_PALMLD) += palmld.o
obj-$(CONFIG_MACH_PALMZ72) += palmz72.o
+obj-$(CONFIG_MACH_TREO680) += treo680.o
obj-$(CONFIG_ARCH_VIPER) += viper.o
ifeq ($(CONFIG_MACH_ZYLONITE),y)
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 962dda2..5363e1a 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -23,6 +23,7 @@
#include <linux/pm.h>
#include <linux/gpio.h>
#include <linux/backlight.h>
+#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -600,6 +601,10 @@
&sharpsl_rom_device,
};
+static struct i2c_board_info __initdata corgi_i2c_devices[] = {
+ { I2C_BOARD_INFO("wm8731", 0x1b) },
+};
+
static void corgi_poweroff(void)
{
if (!machine_is_corgi())
@@ -634,6 +639,7 @@
pxa_set_mci_info(&corgi_mci_platform_data);
pxa_set_ficp_info(&corgi_ficp_platform_data);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(corgi_i2c_devices));
platform_scoop_config = &corgi_pcmcia_config;
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 243e080..63b10d9 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -30,6 +30,7 @@
#include <linux/apm-emulation.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
+#include <linux/regulator/userspace-consumer.h>
#include <media/soc_camera.h>
@@ -735,6 +736,7 @@
.rx_threshold = 1,
.tx_threshold = 1,
.timeout = 1000,
+ .gpio_cs = 14,
};
static unsigned long em_x270_libertas_pin_config[] = {
@@ -803,7 +805,6 @@
struct libertas_spi_platform_data em_x270_libertas_pdata = {
.use_dummy_writes = 1,
- .gpio_cs = 14,
.setup = em_x270_libertas_setup,
.teardown = em_x270_libertas_teardown,
};
@@ -838,10 +839,14 @@
static inline void em_x270_init_spi(void) {}
#endif
-#if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE)
+#if defined(CONFIG_SND_PXA2XX_LIB_AC97)
+static pxa2xx_audio_ops_t em_x270_ac97_info = {
+ .reset_gpio = 113,
+};
+
static void __init em_x270_init_ac97(void)
{
- pxa_set_ac97_info(NULL);
+ pxa_set_ac97_info(&em_x270_ac97_info);
}
#else
static inline void em_x270_init_ac97(void) {}
@@ -1038,6 +1043,52 @@
static inline void em_x270_init_camera(void) {}
#endif
+static struct regulator_bulk_data em_x270_gps_consumer_supply = {
+ .supply = "vcc gps",
+};
+
+static struct regulator_userspace_consumer_data em_x270_gps_consumer_data = {
+ .name = "vcc gps",
+ .num_supplies = 1,
+ .supplies = &em_x270_gps_consumer_supply,
+};
+
+static struct platform_device em_x270_gps_userspace_consumer = {
+ .name = "reg-userspace-consumer",
+ .id = 0,
+ .dev = {
+ .platform_data = &em_x270_gps_consumer_data,
+ },
+};
+
+static struct regulator_bulk_data em_x270_gprs_consumer_supply = {
+ .supply = "vcc gprs",
+};
+
+static struct regulator_userspace_consumer_data em_x270_gprs_consumer_data = {
+ .name = "vcc gprs",
+ .num_supplies = 1,
+ .supplies = &em_x270_gprs_consumer_supply
+};
+
+static struct platform_device em_x270_gprs_userspace_consumer = {
+ .name = "reg-userspace-consumer",
+ .id = 1,
+ .dev = {
+ .platform_data = &em_x270_gprs_consumer_data,
+ }
+};
+
+static struct platform_device *em_x270_userspace_consumers[] = {
+ &em_x270_gps_userspace_consumer,
+ &em_x270_gprs_userspace_consumer,
+};
+
+static void __init em_x270_userspace_consumers_init(void)
+{
+ platform_add_devices(ARRAY_AND_SIZE(em_x270_userspace_consumers));
+}
+
/* DA9030 related initializations */
#define REGULATOR_CONSUMER(_name, _dev, _supply) \
static struct regulator_consumer_supply _name##_consumers[] = { \
@@ -1047,11 +1098,11 @@
}, \
}
-REGULATOR_CONSUMER(ldo3, NULL, "vcc gps");
+REGULATOR_CONSUMER(ldo3, &em_x270_gps_userspace_consumer.dev, "vcc gps");
REGULATOR_CONSUMER(ldo5, NULL, "vcc cam");
REGULATOR_CONSUMER(ldo10, &pxa_device_mci.dev, "vcc sdio");
REGULATOR_CONSUMER(ldo12, NULL, "vcc usb");
-REGULATOR_CONSUMER(ldo19, NULL, "vcc gprs");
+REGULATOR_CONSUMER(ldo19, &em_x270_gprs_userspace_consumer.dev, "vcc gprs");
#define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \
static struct regulator_init_data _ldo##_data = { \
@@ -1062,6 +1113,7 @@
.enabled = 0, \
}, \
.valid_ops_mask = _ops_mask, \
+ .apply_uV = 1, \
}, \
.num_consumer_supplies = ARRAY_SIZE(_ldo##_consumers), \
.consumer_supplies = _ldo##_consumers, \
@@ -1240,6 +1292,7 @@
em_x270_init_spi();
em_x270_init_i2c();
em_x270_init_camera();
+ em_x270_userspace_consumers_init();
}
MACHINE_START(EM_X270, "Compulab EM-X270")
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 7fff467..81359d5 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -30,6 +30,7 @@
#include <linux/pwm_backlight.h>
#include <linux/regulator/bq24022.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/max1586.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/spi.h>
#include <linux/usb/gpio_vbus.h>
@@ -775,6 +776,45 @@
};
/*
+ * Maxim MAX1587A on PI2C
+ */
+
+static struct regulator_consumer_supply max1587a_consumer = {
+ .supply = "vcc_core",
+};
+
+static struct regulator_init_data max1587a_v3_info = {
+ .constraints = {
+ .name = "vcc_core range",
+ .min_uV = 900000,
+ .max_uV = 1705000,
+ .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &max1587a_consumer,
+};
+
+static struct max1586_subdev_data max1587a_subdev = {
+ .name = "vcc_core",
+ .id = MAX1586_V3,
+ .platform_data = &max1587a_v3_info,
+};
+
+static struct max1586_platform_data max1587a_info = {
+ .num_subdevs = 1,
+ .subdevs = &max1587a_subdev,
+ .v3_gain = MAX1586_GAIN_R24_3k32, /* 730..1550 mV */
+};
+
+static struct i2c_board_info __initdata pi2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("max1586", 0x14),
+ .platform_data = &max1587a_info,
+ },
+};
+
+/*
* PCMCIA
*/
@@ -828,6 +868,7 @@
pxa_set_ficp_info(&ficp_info);
pxa27x_set_i2c_power_info(NULL);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(1, ARRAY_AND_SIZE(pi2c_board_info));
pxa2xx_set_spi_info(2, &pxa_ssp2_master_info);
spi_register_board_info(ARRAY_AND_SIZE(tsc2046_board_info));
diff --git a/arch/arm/mach-pxa/include/mach/palmz72.h b/arch/arm/mach-pxa/include/mach/palmz72.h
index 5032307..2806ef6 100644
--- a/arch/arm/mach-pxa/include/mach/palmz72.h
+++ b/arch/arm/mach-pxa/include/mach/palmz72.h
@@ -21,7 +21,7 @@
/* SD/MMC */
#define GPIO_NR_PALMZ72_SD_DETECT_N 14
#define GPIO_NR_PALMZ72_SD_POWER_N 98
-#define GPIO_NR_PALMZ72_SD_RO 115
+#define GPIO_NR_PALMZ72_SD_RO 115
/* Touchscreen */
#define GPIO_NR_PALMZ72_WM9712_IRQ 27
@@ -31,8 +31,7 @@
/* USB */
#define GPIO_NR_PALMZ72_USB_DETECT_N 15
-#define GPIO_NR_PALMZ72_USB_POWER 95
-#define GPIO_NR_PALMZ72_USB_PULLUP 12
+#define GPIO_NR_PALMZ72_USB_PULLUP 95
/* LCD/Backlight */
#define GPIO_NR_PALMZ72_BL_POWER 20
diff --git a/arch/arm/mach-pxa/include/mach/treo680.h b/arch/arm/mach-pxa/include/mach/treo680.h
new file mode 100644
index 0000000..af443b2
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/treo680.h
@@ -0,0 +1,49 @@
+/*
+ * GPIOs and interrupts for Palm Treo 680 smartphone
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _INCLUDE_TREO680_H_
+#define _INCLUDE_TREO680_H_
+
+/* GPIOs */
+#define GPIO_NR_TREO680_POWER_DETECT 0
+#define GPIO_NR_TREO680_AMP_EN 27
+#define GPIO_NR_TREO680_KEYB_BL 24
+#define GPIO_NR_TREO680_VIBRATE_EN 44
+#define GPIO_NR_TREO680_GREEN_LED 20
+#define GPIO_NR_TREO680_RED_LED 79
+#define GPIO_NR_TREO680_SD_DETECT_N 113
+#define GPIO_NR_TREO680_SD_READONLY 33
+#define GPIO_NR_TREO680_EP_DETECT_N 116
+#define GPIO_NR_TREO680_SD_POWER 42
+#define GPIO_NR_TREO680_USB_DETECT 1
+#define GPIO_NR_TREO680_USB_PULLUP 114
+#define GPIO_NR_TREO680_GSM_POWER 40
+#define GPIO_NR_TREO680_GSM_RESET 87
+#define GPIO_NR_TREO680_GSM_WAKE 57
+#define GPIO_NR_TREO680_GSM_HOST_WAKE 14
+#define GPIO_NR_TREO680_GSM_TRIGGER 10
+#define GPIO_NR_TREO680_BT_EN 43
+#define GPIO_NR_TREO680_IR_EN 115
+#define GPIO_NR_TREO680_IR_TXD 47
+#define GPIO_NR_TREO680_BL_POWER 38
+#define GPIO_NR_TREO680_LCD_POWER 25
+
+/* Various addresses */
+#define TREO680_PHYS_RAM_START 0xa0000000
+#define TREO680_PHYS_IO_START 0x40000000
+#define TREO680_STR_BASE 0xa2000000
+
+/* BACKLIGHT */
+#define TREO680_MAX_INTENSITY 254
+#define TREO680_DEFAULT_INTENSITY 160
+#define TREO680_LIMIT_MASK 0x7F
+#define TREO680_PRESCALER 63
+#define TREO680_PERIOD_NS 3500
+
+#endif
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 4dc8c2e..2d28132 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -37,6 +37,7 @@
#include <linux/wm97xx_batt.h>
#include <linux/mtd/physmap.h>
#include <linux/usb/gpio_vbus.h>
+#include <linux/regulator/max1586.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -717,6 +718,38 @@
};
/*
+ * Voltage regulation
+ */
+static struct regulator_consumer_supply max1586_consumers[] = {
+ {
+ .supply = "vcc_core",
+ }
+};
+
+static struct regulator_init_data max1586_v3_info = {
+ .constraints = {
+ .name = "vcc_core range",
+ .min_uV = 1000000,
+ .max_uV = 1705000,
+ .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(max1586_consumers),
+ .consumer_supplies = max1586_consumers,
+};
+
+static struct max1586_subdev_data max1586_subdevs[] = {
+ { .name = "vcc_core", .id = MAX1586_V3,
+ .platform_data = &max1586_v3_info },
+};
+
+static struct max1586_platform_data max1586_info = {
+ .subdevs = max1586_subdevs,
+ .num_subdevs = ARRAY_SIZE(max1586_subdevs),
+ .v3_gain = MAX1586_GAIN_NO_R24, /* 700..1475 mV */
+};
+
+/*
* Camera interface
*/
struct pxacamera_platform_data mioa701_pxacamera_platform_data = {
@@ -725,6 +758,13 @@
.mclk_10khz = 5000,
};
+static struct i2c_board_info __initdata mioa701_pi2c_devices[] = {
+ {
+ I2C_BOARD_INFO("max1586", 0x14),
+ .platform_data = &max1586_info,
+ },
+};
+
static struct soc_camera_link iclink = {
.bus_id = 0, /* Must match id in pxa27x_device_camera in device.c */
};
@@ -825,7 +865,9 @@
platform_add_devices(devices, ARRAY_SIZE(devices));
gsm_init();
+ i2c_register_board_info(1, ARRAY_AND_SIZE(mioa701_pi2c_devices));
pxa_set_i2c_info(&i2c_pdata);
+ pxa27x_set_i2c_power_info(NULL);
pxa_set_camera_info(&mioa701_pxacamera_platform_data);
i2c_register_board_info(0, ARRAY_AND_SIZE(mioa701_i2c_devices));
}
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index b88eb4d..c3645aa 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -27,7 +27,9 @@
#include <linux/pda_power.h>
#include <linux/pwm_backlight.h>
#include <linux/gpio.h>
+#include <linux/wm97xx_batt.h>
#include <linux/power_supply.h>
+#include <linux/usb/gpio_vbus.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -41,6 +43,8 @@
#include <mach/irda.h>
#include <mach/pxa27x_keypad.h>
#include <mach/udc.h>
+#include <mach/palmasoc.h>
+
#include <mach/pm.h>
#include "generic.h"
@@ -66,6 +70,8 @@
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
+ GPIO89_AC97_SYSCLK,
+ GPIO113_AC97_nRESET,
/* IrDA */
GPIO49_GPIO, /* ir disable */
@@ -77,8 +83,7 @@
/* USB */
GPIO15_GPIO, /* usb detect */
- GPIO12_GPIO, /* usb pullup */
- GPIO95_GPIO, /* usb power */
+ GPIO95_GPIO, /* usb pullup */
/* Matrix keypad */
GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
@@ -355,6 +360,22 @@
};
/******************************************************************************
+ * UDC
+ ******************************************************************************/
+static struct gpio_vbus_mach_info palmz72_udc_info = {
+ .gpio_vbus = GPIO_NR_PALMZ72_USB_DETECT_N,
+ .gpio_pullup = GPIO_NR_PALMZ72_USB_PULLUP,
+};
+
+static struct platform_device palmz72_gpio_vbus = {
+ .name = "gpio-vbus",
+ .id = -1,
+ .dev = {
+ .platform_data = &palmz72_udc_info,
+ },
+};
+
+/******************************************************************************
* Power supply
******************************************************************************/
static int power_supply_init(struct device *dev)
@@ -422,6 +443,31 @@
};
/******************************************************************************
+ * WM97xx battery
+ ******************************************************************************/
+static struct wm97xx_batt_info wm97xx_batt_pdata = {
+ .batt_aux = WM97XX_AUX_ID3,
+ .temp_aux = WM97XX_AUX_ID2,
+ .charge_gpio = -1,
+ .max_voltage = PALMZ72_BAT_MAX_VOLTAGE,
+ .min_voltage = PALMZ72_BAT_MIN_VOLTAGE,
+ .batt_mult = 1000,
+ .batt_div = 414,
+ .temp_mult = 1,
+ .temp_div = 1,
+ .batt_tech = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .batt_name = "main-batt",
+};
+
+/******************************************************************************
+ * aSoC audio
+ ******************************************************************************/
+static struct platform_device palmz72_asoc = {
+ .name = "palm27x-asoc",
+ .id = -1,
+};
+
+/******************************************************************************
* Framebuffer
******************************************************************************/
static struct pxafb_mode_info palmz72_lcd_modes[] = {
@@ -527,17 +573,32 @@
static struct platform_device *devices[] __initdata = {
&palmz72_backlight,
&palmz72_leds,
+ &palmz72_asoc,
&power_supply,
+ &palmz72_gpio_vbus,
};
+/* setup udc GPIOs initial state */
+static void __init palmz72_udc_init(void)
+{
+ if (!gpio_request(GPIO_NR_PALMZ72_USB_PULLUP, "USB Pullup")) {
+ gpio_direction_output(GPIO_NR_PALMZ72_USB_PULLUP, 0);
+ gpio_free(GPIO_NR_PALMZ72_USB_PULLUP);
+ }
+}
+
static void __init palmz72_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(palmz72_pin_config));
+
set_pxa_fb_info(&palmz72_lcd_screen);
pxa_set_mci_info(&palmz72_mci_platform_data);
+ palmz72_udc_init();
pxa_set_ac97_info(NULL);
pxa_set_ficp_info(&palmz72_ficp_platform_data);
pxa_set_keypad_info(&palmz72_keypad_platform_data);
+ wm97xx_bat_set_pdata(&wm97xx_batt_pdata);
+
platform_add_devices(devices, ARRAY_SIZE(devices));
}
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index ac431ed..9352d4a 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/mtd/sharpsl.h>
@@ -486,6 +487,10 @@
&sharpsl_rom_device,
};
+static struct i2c_board_info __initdata poodle_i2c_devices[] = {
+ { I2C_BOARD_INFO("wm8731", 0x1b) },
+};
+
static void poodle_poweroff(void)
{
arm_machine_restart('h', NULL);
@@ -519,6 +524,7 @@
pxa_set_mci_info(&poodle_mci_platform_data);
pxa_set_ficp_info(&poodle_ficp_platform_data);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
poodle_init_spi();
}
diff --git a/arch/arm/mach-pxa/treo680.c b/arch/arm/mach-pxa/treo680.c
new file mode 100644
index 0000000..a06f19e
--- /dev/null
+++ b/arch/arm/mach-pxa/treo680.c
@@ -0,0 +1,612 @@
+/*
+ * Hardware definitions for Palm Treo 680
+ *
+ * Author: Tomas Cech <sleep_walker@suse.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * (find more info at www.hackndev.com)
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/pda_power.h>
+#include <linux/pwm_backlight.h>
+#include <linux/gpio.h>
+#include <linux/wm97xx_batt.h>
+#include <linux/power_supply.h>
+#include <linux/sysdev.h>
+#include <linux/w1-gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/pxa27x.h>
+#include <mach/pxa27x-udc.h>
+#include <mach/audio.h>
+#include <mach/treo680.h>
+#include <mach/mmc.h>
+#include <mach/pxafb.h>
+#include <mach/irda.h>
+#include <mach/pxa27x_keypad.h>
+#include <mach/udc.h>
+#include <mach/ohci.h>
+#include <mach/pxa2xx-regs.h>
+#include <mach/palmasoc.h>
+#include <mach/camera.h>
+
+#include <sound/pxa2xx-lib.h>
+
+#include "generic.h"
+#include "devices.h"
+
+/******************************************************************************
+ * Pin configuration
+ ******************************************************************************/
+static unsigned long treo680_pin_config[] __initdata = {
+ /* MMC */
+ GPIO32_MMC_CLK,
+ GPIO92_MMC_DAT_0,
+ GPIO109_MMC_DAT_1,
+ GPIO110_MMC_DAT_2,
+ GPIO111_MMC_DAT_3,
+ GPIO112_MMC_CMD,
+ GPIO33_GPIO, /* SD read only */
+ GPIO113_GPIO, /* SD detect */
+
+ /* AC97 */
+ GPIO28_AC97_BITCLK,
+ GPIO29_AC97_SDATA_IN_0,
+ GPIO30_AC97_SDATA_OUT,
+ GPIO31_AC97_SYNC,
+ GPIO89_AC97_SYSCLK,
+ GPIO95_AC97_nRESET,
+
+ /* IrDA */
+ GPIO46_FICP_RXD,
+ GPIO47_FICP_TXD,
+
+ /* PWM */
+ GPIO16_PWM0_OUT,
+
+ /* USB */
+ GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, /* usb detect */
+
+ /* MATRIX KEYPAD */
+ GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO101_KP_MKIN_1,
+ GPIO102_KP_MKIN_2,
+ GPIO97_KP_MKIN_3,
+ GPIO98_KP_MKIN_4,
+ GPIO99_KP_MKIN_5,
+ GPIO91_KP_MKIN_6,
+ GPIO13_KP_MKIN_7,
+ GPIO103_KP_MKOUT_0 | MFP_LPM_DRIVE_HIGH,
+ GPIO104_KP_MKOUT_1,
+ GPIO105_KP_MKOUT_2,
+ GPIO106_KP_MKOUT_3,
+ GPIO107_KP_MKOUT_4,
+ GPIO108_KP_MKOUT_5,
+ GPIO96_KP_MKOUT_6,
+ GPIO93_KP_DKIN_0 | WAKEUP_ON_LEVEL_HIGH, /* Hotsync button */
+
+ /* LCD */
+ GPIO58_LCD_LDD_0,
+ GPIO59_LCD_LDD_1,
+ GPIO60_LCD_LDD_2,
+ GPIO61_LCD_LDD_3,
+ GPIO62_LCD_LDD_4,
+ GPIO63_LCD_LDD_5,
+ GPIO64_LCD_LDD_6,
+ GPIO65_LCD_LDD_7,
+ GPIO66_LCD_LDD_8,
+ GPIO67_LCD_LDD_9,
+ GPIO68_LCD_LDD_10,
+ GPIO69_LCD_LDD_11,
+ GPIO70_LCD_LDD_12,
+ GPIO71_LCD_LDD_13,
+ GPIO72_LCD_LDD_14,
+ GPIO73_LCD_LDD_15,
+ GPIO74_LCD_FCLK,
+ GPIO75_LCD_LCLK,
+ GPIO76_LCD_PCLK,
+
+ /* Quick Capture Interface */
+ GPIO84_CIF_FV,
+ GPIO85_CIF_LV,
+ GPIO53_CIF_MCLK,
+ GPIO54_CIF_PCLK,
+ GPIO81_CIF_DD_0,
+ GPIO55_CIF_DD_1,
+ GPIO51_CIF_DD_2,
+ GPIO50_CIF_DD_3,
+ GPIO52_CIF_DD_4,
+ GPIO48_CIF_DD_5,
+ GPIO17_CIF_DD_6,
+ GPIO12_CIF_DD_7,
+
+ /* I2C */
+ GPIO117_I2C_SCL,
+ GPIO118_I2C_SDA,
+
+ /* GSM */
+ GPIO14_GPIO | WAKEUP_ON_EDGE_BOTH, /* GSM host wake up */
+ GPIO34_FFUART_RXD,
+ GPIO35_FFUART_CTS,
+ GPIO39_FFUART_TXD,
+ GPIO41_FFUART_RTS,
+
+ /* MISC. */
+ GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH, /* external power detect */
+ GPIO15_GPIO | WAKEUP_ON_EDGE_BOTH, /* silent switch */
+ GPIO116_GPIO, /* headphone detect */
+ GPIO11_GPIO | WAKEUP_ON_EDGE_BOTH, /* bluetooth host wake up */
+};
+
+/******************************************************************************
+ * SD/MMC card controller
+ ******************************************************************************/
+static int treo680_mci_init(struct device *dev,
+ irq_handler_t treo680_detect_int, void *data)
+{
+ int err = 0;
+
+ /* Setup an interrupt for detecting card insert/remove events */
+ err = gpio_request(GPIO_NR_TREO680_SD_DETECT_N, "SD IRQ");
+
+ if (err)
+ goto err;
+
+ err = gpio_direction_input(GPIO_NR_TREO680_SD_DETECT_N);
+ if (err)
+ goto err2;
+
+ err = request_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N),
+ treo680_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "SD/MMC card detect", data);
+
+ if (err) {
+ dev_err(dev, "%s: cannot request SD/MMC card detect IRQ\n",
+ __func__);
+ goto err2;
+ }
+
+ err = gpio_request(GPIO_NR_TREO680_SD_POWER, "SD_POWER");
+ if (err)
+ goto err3;
+
+ err = gpio_direction_output(GPIO_NR_TREO680_SD_POWER, 1);
+ if (err)
+ goto err4;
+
+ err = gpio_request(GPIO_NR_TREO680_SD_READONLY, "SD_READONLY");
+ if (err)
+ goto err4;
+
+ err = gpio_direction_input(GPIO_NR_TREO680_SD_READONLY);
+ if (err)
+ goto err5;
+
+ return 0;
+
+err5:
+ gpio_free(GPIO_NR_TREO680_SD_READONLY);
+err4:
+ gpio_free(GPIO_NR_TREO680_SD_POWER);
+err3:
+ free_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N), data);
+err2:
+ gpio_free(GPIO_NR_TREO680_SD_DETECT_N);
+err:
+ return err;
+}
+
+static void treo680_mci_exit(struct device *dev, void *data)
+{
+ gpio_free(GPIO_NR_TREO680_SD_READONLY);
+ gpio_free(GPIO_NR_TREO680_SD_POWER);
+ free_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N), data);
+ gpio_free(GPIO_NR_TREO680_SD_DETECT_N);
+}
+
+static void treo680_mci_power(struct device *dev, unsigned int vdd)
+{
+ struct pxamci_platform_data *p_d = dev->platform_data;
+ gpio_set_value(GPIO_NR_TREO680_SD_POWER, p_d->ocr_mask & (1 << vdd));
+}
+
+static int treo680_mci_get_ro(struct device *dev)
+{
+ return gpio_get_value(GPIO_NR_TREO680_SD_READONLY);
+}
+
+static struct pxamci_platform_data treo680_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .setpower = treo680_mci_power,
+ .get_ro = treo680_mci_get_ro,
+ .init = treo680_mci_init,
+ .exit = treo680_mci_exit,
+};
+
+/******************************************************************************
+ * GPIO keyboard
+ ******************************************************************************/
+static unsigned int treo680_matrix_keys[] = {
+ KEY(0, 0, KEY_F8), /* Red/Off/Power */
+ KEY(0, 1, KEY_LEFT),
+ KEY(0, 2, KEY_LEFTCTRL), /* Alternate */
+ KEY(0, 3, KEY_L),
+ KEY(0, 4, KEY_A),
+ KEY(0, 5, KEY_Q),
+ KEY(0, 6, KEY_P),
+
+ KEY(1, 0, KEY_RIGHTCTRL), /* Menu */
+ KEY(1, 1, KEY_RIGHT),
+ KEY(1, 2, KEY_LEFTSHIFT), /* Left shift */
+ KEY(1, 3, KEY_Z),
+ KEY(1, 4, KEY_S),
+ KEY(1, 5, KEY_W),
+
+ KEY(2, 0, KEY_F1), /* Phone */
+ KEY(2, 1, KEY_UP),
+ KEY(2, 2, KEY_0),
+ KEY(2, 3, KEY_X),
+ KEY(2, 4, KEY_D),
+ KEY(2, 5, KEY_E),
+
+ KEY(3, 0, KEY_F10), /* Calendar */
+ KEY(3, 1, KEY_DOWN),
+ KEY(3, 2, KEY_SPACE),
+ KEY(3, 3, KEY_C),
+ KEY(3, 4, KEY_F),
+ KEY(3, 5, KEY_R),
+
+ KEY(4, 0, KEY_F12), /* Mail */
+ KEY(4, 1, KEY_KPENTER),
+ KEY(4, 2, KEY_RIGHTALT), /* Alt */
+ KEY(4, 3, KEY_V),
+ KEY(4, 4, KEY_G),
+ KEY(4, 5, KEY_T),
+
+ KEY(5, 0, KEY_F9), /* Home */
+ KEY(5, 1, KEY_PAGEUP), /* Side up */
+ KEY(5, 2, KEY_DOT),
+ KEY(5, 3, KEY_B),
+ KEY(5, 4, KEY_H),
+ KEY(5, 5, KEY_Y),
+
+ KEY(6, 0, KEY_TAB), /* Side Activate */
+ KEY(6, 1, KEY_PAGEDOWN), /* Side down */
+ KEY(6, 2, KEY_ENTER),
+ KEY(6, 3, KEY_N),
+ KEY(6, 4, KEY_J),
+ KEY(6, 5, KEY_U),
+
+ KEY(7, 0, KEY_F6), /* Green/Call */
+ KEY(7, 1, KEY_O),
+ KEY(7, 2, KEY_BACKSPACE),
+ KEY(7, 3, KEY_M),
+ KEY(7, 4, KEY_K),
+ KEY(7, 5, KEY_I),
+};
+
+static struct pxa27x_keypad_platform_data treo680_keypad_platform_data = {
+ .matrix_key_rows = 8,
+ .matrix_key_cols = 7,
+ .matrix_key_map = treo680_matrix_keys,
+ .matrix_key_map_size = ARRAY_SIZE(treo680_matrix_keys),
+ .direct_key_map = { KEY_CONNECT },
+ .direct_key_num = 1,
+
+ .debounce_interval = 30,
+};
+
+/******************************************************************************
+ * aSoC audio
+ ******************************************************************************/
+
+static pxa2xx_audio_ops_t treo680_ac97_pdata = {
+ .reset_gpio = 95,
+};
+
+/******************************************************************************
+ * Backlight
+ ******************************************************************************/
+static int treo680_backlight_init(struct device *dev)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_TREO680_BL_POWER, "BL POWER");
+ if (ret)
+ goto err;
+ ret = gpio_direction_output(GPIO_NR_TREO680_BL_POWER, 0);
+ if (ret)
+ goto err2;
+ ret = gpio_request(GPIO_NR_TREO680_LCD_POWER, "LCD POWER");
+ if (ret)
+ goto err2;
+ ret = gpio_direction_output(GPIO_NR_TREO680_LCD_POWER, 0);
+ if (ret)
+ goto err3;
+
+ return 0;
+err3:
+ gpio_free(GPIO_NR_TREO680_LCD_POWER);
+err2:
+ gpio_free(GPIO_NR_TREO680_BL_POWER);
+err:
+ return ret;
+}
+
+static int treo680_backlight_notify(int brightness)
+{
+ gpio_set_value(GPIO_NR_TREO680_BL_POWER, brightness);
+ return TREO680_MAX_INTENSITY - brightness;
+};
+
+static void treo680_backlight_exit(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_BL_POWER);
+ gpio_free(GPIO_NR_TREO680_LCD_POWER);
+}
+
+static struct platform_pwm_backlight_data treo680_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = TREO680_MAX_INTENSITY,
+ .dft_brightness = TREO680_DEFAULT_INTENSITY,
+ .pwm_period_ns = TREO680_PERIOD_NS,
+ .init = treo680_backlight_init,
+ .notify = treo680_backlight_notify,
+ .exit = treo680_backlight_exit,
+};
+
+static struct platform_device treo680_backlight = {
+ .name = "pwm-backlight",
+ .dev = {
+ .parent = &pxa27x_device_pwm0.dev,
+ .platform_data = &treo680_backlight_data,
+ },
+};
+
+/******************************************************************************
+ * IrDA
+ ******************************************************************************/
+static void treo680_transceiver_mode(struct device *dev, int mode)
+{
+ gpio_set_value(GPIO_NR_TREO680_IR_EN, mode & IR_OFF);
+ pxa2xx_transceiver_mode(dev, mode);
+}
+
+static int treo680_irda_startup(struct device *dev)
+{
+ int err;
+
+ err = gpio_request(GPIO_NR_TREO680_IR_EN, "Ir port disable");
+ if (err)
+ goto err1;
+
+ err = gpio_direction_output(GPIO_NR_TREO680_IR_EN, 1);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ dev_err(dev, "treo680_irda: cannot change IR gpio direction\n");
+ gpio_free(GPIO_NR_TREO680_IR_EN);
+err1:
+ dev_err(dev, "treo680_irda: cannot allocate IR gpio\n");
+ return err;
+}
+
+static void treo680_irda_shutdown(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_AMP_EN);
+}
+
+static struct pxaficp_platform_data treo680_ficp_info = {
+ .transceiver_cap = IR_FIRMODE | IR_SIRMODE | IR_OFF,
+ .startup = treo680_irda_startup,
+ .shutdown = treo680_irda_shutdown,
+ .transceiver_mode = treo680_transceiver_mode,
+};
+
+/******************************************************************************
+ * UDC
+ ******************************************************************************/
+static struct pxa2xx_udc_mach_info treo680_udc_info __initdata = {
+ .gpio_vbus = GPIO_NR_TREO680_USB_DETECT,
+ .gpio_vbus_inverted = 1,
+ .gpio_pullup = GPIO_NR_TREO680_USB_PULLUP,
+};
+
+
+/******************************************************************************
+ * USB host
+ ******************************************************************************/
+static struct pxaohci_platform_data treo680_ohci_info = {
+ .port_mode = PMM_PERPORT_MODE,
+ .flags = ENABLE_PORT1 | ENABLE_PORT3,
+ .power_budget = 0,
+};
+
+/******************************************************************************
+ * Power supply
+ ******************************************************************************/
+static int power_supply_init(struct device *dev)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_TREO680_POWER_DETECT, "CABLE_STATE_AC");
+ if (ret)
+ goto err1;
+ ret = gpio_direction_input(GPIO_NR_TREO680_POWER_DETECT);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ gpio_free(GPIO_NR_TREO680_POWER_DETECT);
+err1:
+ return ret;
+}
+
+static int treo680_is_ac_online(void)
+{
+ return gpio_get_value(GPIO_NR_TREO680_POWER_DETECT);
+}
+
+static void power_supply_exit(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_POWER_DETECT);
+}
+
+static char *treo680_supplicants[] = {
+ "main-battery",
+};
+
+static struct pda_power_pdata power_supply_info = {
+ .init = power_supply_init,
+ .is_ac_online = treo680_is_ac_online,
+ .exit = power_supply_exit,
+ .supplied_to = treo680_supplicants,
+ .num_supplicants = ARRAY_SIZE(treo680_supplicants),
+};
+
+static struct platform_device power_supply = {
+ .name = "pda-power",
+ .id = -1,
+ .dev = {
+ .platform_data = &power_supply_info,
+ },
+};
+
+/******************************************************************************
+ * Vibra and LEDs
+ ******************************************************************************/
+static struct gpio_led gpio_leds[] = {
+ {
+ .name = "treo680:vibra:vibra",
+ .default_trigger = "none",
+ .gpio = GPIO_NR_TREO680_VIBRATE_EN,
+ },
+ {
+ .name = "treo680:green:led",
+ .default_trigger = "mmc0",
+ .gpio = GPIO_NR_TREO680_GREEN_LED,
+ },
+ {
+ .name = "treo680:keybbl:keybbl",
+ .default_trigger = "none",
+ .gpio = GPIO_NR_TREO680_KEYB_BL,
+ },
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device treo680_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_led_info,
+ }
+};
+
+
+/******************************************************************************
+ * Framebuffer
+ ******************************************************************************/
+/* TODO: add support for 324x324 */
+static struct pxafb_mode_info treo680_lcd_modes[] = {
+{
+ .pixclock = 86538,
+ .xres = 320,
+ .yres = 320,
+ .bpp = 16,
+
+ .left_margin = 20,
+ .right_margin = 8,
+ .upper_margin = 8,
+ .lower_margin = 5,
+
+ .hsync_len = 4,
+ .vsync_len = 1,
+},
+};
+
+static struct pxafb_mach_info treo680_lcd_screen = {
+ .modes = treo680_lcd_modes,
+ .num_modes = ARRAY_SIZE(treo680_lcd_modes),
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
+};
+
+/******************************************************************************
+ * Power management - standby
+ ******************************************************************************/
+static void __init treo680_pm_init(void)
+{
+ static u32 resume[] = {
+ 0xe3a00101, /* mov r0, #0x40000000 */
+ 0xe380060f, /* orr r0, r0, #0x00f00000 */
+ 0xe590f008, /* ldr pc, [r0, #0x08] */
+ };
+
+ /* this is where the bootloader jumps */
+ memcpy(phys_to_virt(TREO680_STR_BASE), resume, sizeof(resume));
+}
+
+/******************************************************************************
+ * Machine init
+ ******************************************************************************/
+static struct platform_device *devices[] __initdata = {
+ &treo680_backlight,
+ &treo680_leds,
+ &power_supply,
+};
+
+/* setup udc GPIOs initial state */
+static void __init treo680_udc_init(void)
+{
+ if (!gpio_request(GPIO_NR_TREO680_USB_PULLUP, "UDC Vbus")) {
+ gpio_direction_output(GPIO_NR_TREO680_USB_PULLUP, 1);
+ gpio_free(GPIO_NR_TREO680_USB_PULLUP);
+ }
+}
+
+static void __init treo680_init(void)
+{
+ treo680_pm_init();
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(treo680_pin_config));
+ pxa_set_keypad_info(&treo680_keypad_platform_data);
+ set_pxa_fb_info(&treo680_lcd_screen);
+ pxa_set_mci_info(&treo680_mci_platform_data);
+ treo680_udc_init();
+ pxa_set_udc_info(&treo680_udc_info);
+ pxa_set_ac97_info(&treo680_ac97_pdata);
+ pxa_set_ficp_info(&treo680_ficp_info);
+ pxa_set_ohci_info(&treo680_ohci_info);
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+MACHINE_START(TREO680, "Palm Treo 680")
+ .phys_io = TREO680_PHYS_IO_START,
+ .io_pg_offst = io_p2v(0x40000000),
+ .boot_params = 0xa0000100,
+ .map_io = pxa_map_io,
+ .init_irq = pxa27x_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = treo680_init,
+MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 1fe294d..ede2a57 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -27,6 +27,7 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
diff --git a/arch/arm/mach-s3c2410/usb-simtec.c b/arch/arm/mach-s3c2410/usb-simtec.c
index 6cd9377..50e25fc 100644
--- a/arch/arm/mach-s3c2410/usb-simtec.c
+++ b/arch/arm/mach-s3c2410/usb-simtec.c
@@ -22,7 +22,6 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index 5df73cb..8cfeaec 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -84,5 +84,15 @@
help
Say Y here if you are using the AT2440EVB development board
+config MACH_MINI2440
+ bool "MINI2440 development board"
+ select CPU_S3C2440
+ select EEPROM_AT24
+ select LEDS_TRIGGER_BACKLIGHT
+ select SND_S3C24XX_SOC_S3C24XX_UDA134X
+ help
+ Say Y here to select support for the MINI2440. Is a 10cm x 10cm board
+ available via various sources. It can come with a 3.5" or 7" touch LCD.
+
endmenu
diff --git a/arch/arm/mach-s3c2440/Makefile b/arch/arm/mach-s3c2440/Makefile
index 0b4440e..bfadcf6 100644
--- a/arch/arm/mach-s3c2440/Makefile
+++ b/arch/arm/mach-s3c2440/Makefile
@@ -22,3 +22,4 @@
obj-$(CONFIG_ARCH_S3C2440) += mach-smdk2440.o
obj-$(CONFIG_MACH_NEXCODER_2440) += mach-nexcoder.o
obj-$(CONFIG_MACH_AT2440EVB) += mach-at2440evb.o
+obj-$(CONFIG_MACH_MINI2440) += mach-mini2440.o
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
new file mode 100644
index 0000000..6a5bc30
--- /dev/null
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -0,0 +1,703 @@
+/* linux/arch/arm/mach-s3c2440/mach-mini2440.c
+ *
+ * Copyright (c) 2008 Ramax Lo <ramaxlo@gmail.com>
+ * Based on mach-anubis.c by Ben Dooks <ben@simtec.co.uk>
+ * and modifications by SBZ <sbz@spgui.org> and
+ * Weibing <http://weibing.blogbus.com> and
+ * Michel Pollet <buserror@gmail.com>
+ *
+ * For product information, visit http://code.google.com/p/mini2440/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/dm9000.h>
+#include <linux/i2c/at24.h>
+#include <linux/platform_device.h>
+#include <linux/gpio_keys.h>
+#include <linux/i2c.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/hardware.h>
+#include <mach/fb.h>
+#include <asm/mach-types.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+#include <mach/leds-gpio.h>
+#include <mach/regs-mem.h>
+#include <mach/regs-lcd.h>
+#include <mach/irqs.h>
+#include <plat/nand.h>
+#include <plat/iic.h>
+#include <plat/mci.h>
+#include <plat/udc.h>
+
+#include <plat/regs-serial.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+#include <sound/s3c24xx_uda134x.h>
+
+#define MACH_MINI2440_DM9K_BASE (S3C2410_CS4 + 0x300)
+
+static struct map_desc mini2440_iodesc[] __initdata = {
+ /* nothing to declare, move along */
+};
+
+#define UCON S3C2410_UCON_DEFAULT
+#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
+#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+
+
+static struct s3c2410_uartcfg mini2440_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+};
+
+/* USB device UDC support */
+
+static void mini2440_udc_pullup(enum s3c2410_udc_cmd_e cmd)
+{
+ pr_debug("udc: pullup(%d)\n", cmd);
+
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE :
+ s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
+ break;
+ case S3C2410_UDC_P_DISABLE :
+ s3c2410_gpio_setpin(S3C2410_GPC(5), 0);
+ break;
+ case S3C2410_UDC_P_RESET :
+ break;
+ default:
+ break;
+ }
+}
+
+static struct s3c2410_udc_mach_info mini2440_udc_cfg __initdata = {
+ .udc_command = mini2440_udc_pullup,
+};
+
+
+/* LCD timing and setup */
+
+/*
+ * This macro simplifies the table bellow
+ */
+#define _LCD_DECLARE(_clock,_xres,margin_left,margin_right,hsync, \
+ _yres,margin_top,margin_bottom,vsync, refresh) \
+ .width = _xres, \
+ .xres = _xres, \
+ .height = _yres, \
+ .yres = _yres, \
+ .left_margin = margin_left, \
+ .right_margin = margin_right, \
+ .upper_margin = margin_top, \
+ .lower_margin = margin_bottom, \
+ .hsync_len = hsync, \
+ .vsync_len = vsync, \
+ .pixclock = ((_clock*100000000000LL) / \
+ ((refresh) * \
+ (hsync + margin_left + _xres + margin_right) * \
+ (vsync + margin_top + _yres + margin_bottom))), \
+ .bpp = 16,\
+ .type = (S3C2410_LCDCON1_TFT16BPP |\
+ S3C2410_LCDCON1_TFT)
+
+struct s3c2410fb_display mini2440_lcd_cfg[] __initdata = {
+ [0] = { /* mini2440 + 3.5" TFT + touchscreen */
+ _LCD_DECLARE(
+ 7, /* The 3.5 is quite fast */
+ 240, 21, 38, 6, /* x timing */
+ 320, 4, 4, 2, /* y timing */
+ 60), /* refresh rate */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_INVVLINE |
+ S3C2410_LCDCON5_INVVFRAME |
+ S3C2410_LCDCON5_INVVDEN |
+ S3C2410_LCDCON5_PWREN),
+ },
+ [1] = { /* mini2440 + 7" TFT + touchscreen */
+ _LCD_DECLARE(
+ 10, /* the 7" runs slower */
+ 800, 40, 40, 48, /* x timing */
+ 480, 29, 3, 3, /* y timing */
+ 50), /* refresh rate */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_INVVLINE |
+ S3C2410_LCDCON5_INVVFRAME |
+ S3C2410_LCDCON5_PWREN),
+ },
+ /* The VGA shield can outout at several resolutions. All share
+ * the same timings, however, anything smaller than 1024x768
+ * will only be displayed in the top left corner of a 1024x768
+ * XGA output unless you add optional dip switches to the shield.
+ * Therefore timings for other resolutions have been ommited here.
+ */
+ [2] = {
+ _LCD_DECLARE(
+ 10,
+ 1024, 1, 2, 2, /* y timing */
+ 768, 200, 16, 16, /* x timing */
+ 24), /* refresh rate, maximum stable,
+ tested with the FPGA shield */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_HWSWP),
+ },
+};
+
+/* todo - put into gpio header */
+
+#define S3C2410_GPCCON_MASK(x) (3 << ((x) * 2))
+#define S3C2410_GPDCON_MASK(x) (3 << ((x) * 2))
+
+struct s3c2410fb_mach_info mini2440_fb_info __initdata = {
+ .displays = &mini2440_lcd_cfg[0], /* not constant! see init */
+ .num_displays = 1,
+ .default_display = 0,
+
+ /* Enable VD[2..7], VD[10..15], VD[18..23] and VCLK, syncs, VDEN
+ * and disable the pull down resistors on pins we are using for LCD
+ * data. */
+
+ .gpcup = (0xf << 1) | (0x3f << 10),
+
+ .gpccon = (S3C2410_GPC1_VCLK | S3C2410_GPC2_VLINE |
+ S3C2410_GPC3_VFRAME | S3C2410_GPC4_VM |
+ S3C2410_GPC10_VD2 | S3C2410_GPC11_VD3 |
+ S3C2410_GPC12_VD4 | S3C2410_GPC13_VD5 |
+ S3C2410_GPC14_VD6 | S3C2410_GPC15_VD7),
+
+ .gpccon_mask = (S3C2410_GPCCON_MASK(1) | S3C2410_GPCCON_MASK(2) |
+ S3C2410_GPCCON_MASK(3) | S3C2410_GPCCON_MASK(4) |
+ S3C2410_GPCCON_MASK(10) | S3C2410_GPCCON_MASK(11) |
+ S3C2410_GPCCON_MASK(12) | S3C2410_GPCCON_MASK(13) |
+ S3C2410_GPCCON_MASK(14) | S3C2410_GPCCON_MASK(15)),
+
+ .gpdup = (0x3f << 2) | (0x3f << 10),
+
+ .gpdcon = (S3C2410_GPD2_VD10 | S3C2410_GPD3_VD11 |
+ S3C2410_GPD4_VD12 | S3C2410_GPD5_VD13 |
+ S3C2410_GPD6_VD14 | S3C2410_GPD7_VD15 |
+ S3C2410_GPD10_VD18 | S3C2410_GPD11_VD19 |
+ S3C2410_GPD12_VD20 | S3C2410_GPD13_VD21 |
+ S3C2410_GPD14_VD22 | S3C2410_GPD15_VD23),
+
+ .gpdcon_mask = (S3C2410_GPDCON_MASK(2) | S3C2410_GPDCON_MASK(3) |
+ S3C2410_GPDCON_MASK(4) | S3C2410_GPDCON_MASK(5) |
+ S3C2410_GPDCON_MASK(6) | S3C2410_GPDCON_MASK(7) |
+ S3C2410_GPDCON_MASK(10) | S3C2410_GPDCON_MASK(11)|
+ S3C2410_GPDCON_MASK(12) | S3C2410_GPDCON_MASK(13)|
+ S3C2410_GPDCON_MASK(14) | S3C2410_GPDCON_MASK(15)),
+};
+
+/* MMC/SD */
+
+static struct s3c24xx_mci_pdata mini2440_mmc_cfg __initdata = {
+ .gpio_detect = S3C2410_GPG(8),
+ .gpio_wprotect = S3C2410_GPH(8),
+ .set_power = NULL,
+ .ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34,
+};
+
+/* NAND Flash on MINI2440 board */
+
+static struct mtd_partition mini2440_default_nand_part[] __initdata = {
+ [0] = {
+ .name = "u-boot",
+ .size = SZ_256K,
+ .offset = 0,
+ },
+ [1] = {
+ .name = "u-boot-env",
+ .size = SZ_128K,
+ .offset = SZ_256K,
+ },
+ [2] = {
+ .name = "kernel",
+ /* 5 megabytes, for a kernel with no modules
+ * or a uImage with a ramdisk attached */
+ .size = 0x00500000,
+ .offset = SZ_256K + SZ_128K,
+ },
+ [3] = {
+ .name = "root",
+ .offset = SZ_256K + SZ_128K + 0x00500000,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct s3c2410_nand_set mini2440_nand_sets[] __initdata = {
+ [0] = {
+ .name = "nand",
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(mini2440_default_nand_part),
+ .partitions = mini2440_default_nand_part,
+ },
+};
+
+static struct s3c2410_platform_nand mini2440_nand_info __initdata = {
+ .tacls = 0,
+ .twrph0 = 25,
+ .twrph1 = 15,
+ .nr_sets = ARRAY_SIZE(mini2440_nand_sets),
+ .sets = mini2440_nand_sets,
+ .ignore_unset_ecc = 1,
+};
+
+/* DM9000AEP 10/100 ethernet controller */
+
+static struct resource mini2440_dm9k_resource[] __initdata = {
+ [0] = {
+ .start = MACH_MINI2440_DM9K_BASE,
+ .end = MACH_MINI2440_DM9K_BASE + 3,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = MACH_MINI2440_DM9K_BASE + 4,
+ .end = MACH_MINI2440_DM9K_BASE + 7,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = IRQ_EINT7,
+ .end = IRQ_EINT7,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ }
+};
+
+/*
+ * The DM9000 has no eeprom, and it's MAC address is set by
+ * the bootloader before starting the kernel.
+ */
+static struct dm9000_plat_data mini2440_dm9k_pdata __initdata = {
+ .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+};
+
+static struct platform_device mini2440_device_eth __initdata = {
+ .name = "dm9000",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(mini2440_dm9k_resource),
+ .resource = mini2440_dm9k_resource,
+ .dev = {
+ .platform_data = &mini2440_dm9k_pdata,
+ },
+};
+
+/* CON5
+ * +--+ /-----\
+ * | | | |
+ * | | | BAT |
+ * | | \_____/
+ * | |
+ * | | +----+ +----+
+ * | | | K5 | | K1 |
+ * | | +----+ +----+
+ * | | +----+ +----+
+ * | | | K4 | | K2 |
+ * | | +----+ +----+
+ * | | +----+ +----+
+ * | | | K6 | | K3 |
+ * | | +----+ +----+
+ * .....
+ */
+static struct gpio_keys_button mini2440_buttons[] __initdata = {
+ {
+ .gpio = S3C2410_GPG(0), /* K1 */
+ .code = KEY_F1,
+ .desc = "Button 1",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(3), /* K2 */
+ .code = KEY_F2,
+ .desc = "Button 2",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(5), /* K3 */
+ .code = KEY_F3,
+ .desc = "Button 3",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(6), /* K4 */
+ .code = KEY_POWER,
+ .desc = "Power",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(7), /* K5 */
+ .code = KEY_F5,
+ .desc = "Button 5",
+ .active_low = 1,
+ },
+#if 0
+ /* this pin is also known as TCLK1 and seems to already
+ * marked as "in use" somehow in the kernel -- possibly wrongly */
+ {
+ .gpio = S3C2410_GPG(11), /* K6 */
+ .code = KEY_F6,
+ .desc = "Button 6",
+ .active_low = 1,
+ },
+#endif
+};
+
+static struct gpio_keys_platform_data mini2440_button_data __initdata = {
+ .buttons = mini2440_buttons,
+ .nbuttons = ARRAY_SIZE(mini2440_buttons),
+};
+
+static struct platform_device mini2440_button_device __initdata = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &mini2440_button_data,
+ }
+};
+
+/* LEDS */
+
+static struct s3c24xx_led_platdata mini2440_led1_pdata __initdata = {
+ .name = "led1",
+ .gpio = S3C2410_GPB(5),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "heartbeat",
+};
+
+static struct s3c24xx_led_platdata mini2440_led2_pdata __initdata = {
+ .name = "led2",
+ .gpio = S3C2410_GPB(6),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "nand-disk",
+};
+
+static struct s3c24xx_led_platdata mini2440_led3_pdata __initdata = {
+ .name = "led3",
+ .gpio = S3C2410_GPB(7),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "mmc0",
+};
+
+static struct s3c24xx_led_platdata mini2440_led4_pdata __initdata = {
+ .name = "led4",
+ .gpio = S3C2410_GPB(8),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "",
+};
+
+static struct s3c24xx_led_platdata mini2440_led_backlight_pdata __initdata = {
+ .name = "backlight",
+ .gpio = S3C2410_GPG(4),
+ .def_trigger = "backlight",
+};
+
+static struct platform_device mini2440_led1 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 1,
+ .dev = {
+ .platform_data = &mini2440_led1_pdata,
+ },
+};
+
+static struct platform_device mini2440_led2 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 2,
+ .dev = {
+ .platform_data = &mini2440_led2_pdata,
+ },
+};
+
+static struct platform_device mini2440_led3 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 3,
+ .dev = {
+ .platform_data = &mini2440_led3_pdata,
+ },
+};
+
+static struct platform_device mini2440_led4 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 4,
+ .dev = {
+ .platform_data = &mini2440_led4_pdata,
+ },
+};
+
+static struct platform_device mini2440_led_backlight __initdata = {
+ .name = "s3c24xx_led",
+ .id = 5,
+ .dev = {
+ .platform_data = &mini2440_led_backlight_pdata,
+ },
+};
+
+/* AUDIO */
+
+static struct s3c24xx_uda134x_platform_data mini2440_audio_pins __initdata = {
+ .l3_clk = S3C2410_GPB(4),
+ .l3_mode = S3C2410_GPB(2),
+ .l3_data = S3C2410_GPB(3),
+ .model = UDA134X_UDA1341
+};
+
+static struct platform_device mini2440_audio __initdata = {
+ .name = "s3c24xx_uda134x",
+ .id = 0,
+ .dev = {
+ .platform_data = &mini2440_audio_pins,
+ },
+};
+
+/*
+ * I2C devices
+ */
+static struct at24_platform_data at24c08 = {
+ .byte_len = SZ_8K / 8,
+ .page_size = 16,
+};
+
+static struct i2c_board_info mini2440_i2c_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("24c08", 0x50),
+ .platform_data = &at24c08,
+ },
+};
+
+static struct platform_device *mini2440_devices[] __initdata = {
+ &s3c_device_usb,
+ &s3c_device_wdt,
+/* &s3c_device_adc,*/ /* ADC doesn't like living with touchscreen ! */
+ &s3c_device_i2c0,
+ &s3c_device_rtc,
+ &s3c_device_usbgadget,
+ &mini2440_device_eth,
+ &mini2440_led1,
+ &mini2440_led2,
+ &mini2440_led3,
+ &mini2440_led4,
+ &mini2440_button_device,
+ &s3c_device_nand,
+ &s3c_device_sdi,
+ &s3c_device_iis,
+ &mini2440_audio,
+/* &s3c_device_timer[0],*/ /* buzzer pwm, no API for it */
+ /* remaining devices are optional */
+};
+
+static void __init mini2440_map_io(void)
+{
+ s3c24xx_init_io(mini2440_iodesc, ARRAY_SIZE(mini2440_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(mini2440_uartcfgs, ARRAY_SIZE(mini2440_uartcfgs));
+
+ s3c_device_nand.dev.platform_data = &mini2440_nand_info;
+ s3c_device_sdi.dev.platform_data = &mini2440_mmc_cfg;
+}
+
+/*
+ * mini2440_features string
+ *
+ * t = Touchscreen present
+ * b = backlight control
+ * c = camera [TODO]
+ * 0-9 LCD configuration
+ *
+ */
+static char mini2440_features_str[12] __initdata = "0tb";
+
+static int __init mini2440_features_setup(char *str)
+{
+ if (str)
+ strlcpy(mini2440_features_str, str, sizeof(mini2440_features_str));
+ return 1;
+}
+
+__setup("mini2440=", mini2440_features_setup);
+
+#define FEATURE_SCREEN (1 << 0)
+#define FEATURE_BACKLIGHT (1 << 1)
+#define FEATURE_TOUCH (1 << 2)
+#define FEATURE_CAMERA (1 << 3)
+
+struct mini2440_features_t {
+ int count;
+ int done;
+ int lcd_index;
+ struct platform_device *optional[8];
+};
+
+static void mini2440_parse_features(
+ struct mini2440_features_t * features,
+ const char * features_str )
+{
+ const char * fp = features_str;
+
+ features->count = 0;
+ features->done = 0;
+ features->lcd_index = -1;
+
+ while (*fp) {
+ char f = *fp++;
+
+ switch (f) {
+ case '0'...'9': /* tft screen */
+ if (features->done & FEATURE_SCREEN) {
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "screen type already set\n", f);
+ } else {
+ int li = f - '0';
+ if (li >= ARRAY_SIZE(mini2440_lcd_cfg))
+ printk(KERN_INFO "MINI2440: "
+ "'%c' out of range LCD mode\n", f);
+ else {
+ features->optional[features->count++] =
+ &s3c_device_lcd;
+ features->lcd_index = li;
+ }
+ }
+ features->done |= FEATURE_SCREEN;
+ break;
+ case 'b':
+ if (features->done & FEATURE_BACKLIGHT)
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "backlight already set\n", f);
+ else {
+ features->optional[features->count++] =
+ &mini2440_led_backlight;
+ }
+ features->done |= FEATURE_BACKLIGHT;
+ break;
+ case 't':
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "touchscreen not compiled in\n", f);
+ break;
+ case 'c':
+ if (features->done & FEATURE_CAMERA)
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "camera already registered\n", f);
+ else
+ features->optional[features->count++] =
+ &s3c_device_camif;
+ features->done |= FEATURE_CAMERA;
+ break;
+ }
+ }
+}
+
+static void __init mini2440_init(void)
+{
+ struct mini2440_features_t features = { 0 };
+ int i;
+
+ printk(KERN_INFO "MINI2440: Option string mini2440=%s\n",
+ mini2440_features_str);
+
+ /* Parse the feature string */
+ mini2440_parse_features(&features, mini2440_features_str);
+
+ /* turn LCD on */
+ s3c2410_gpio_cfgpin(S3C2410_GPC(0), S3C2410_GPC0_LEND);
+
+ /* Turn the backlight early on */
+ s3c2410_gpio_setpin(S3C2410_GPG(4), 1);
+ s3c2410_gpio_cfgpin(S3C2410_GPG(4), S3C2410_GPIO_OUTPUT);
+
+ /* remove pullup on optional PWM backlight -- unused on 3.5 and 7"s */
+ s3c2410_gpio_pullup(S3C2410_GPB(1), 0);
+ s3c2410_gpio_setpin(S3C2410_GPB(1), 0);
+ s3c2410_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT);
+
+ /* Make sure the D+ pullup pin is output */
+ s3c2410_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
+
+ /* mark the key as input, without pullups (there is one on the board) */
+ for (i = 0; i < ARRAY_SIZE(mini2440_buttons); i++) {
+ s3c2410_gpio_pullup(mini2440_buttons[i].gpio, 0);
+ s3c2410_gpio_cfgpin(mini2440_buttons[i].gpio,
+ S3C2410_GPIO_INPUT);
+ }
+ if (features.lcd_index != -1) {
+ int li;
+
+ mini2440_fb_info.displays =
+ &mini2440_lcd_cfg[features.lcd_index];
+
+ printk(KERN_INFO "MINI2440: LCD");
+ for (li = 0; li < ARRAY_SIZE(mini2440_lcd_cfg); li++)
+ if (li == features.lcd_index)
+ printk(" [%d:%dx%d]", li,
+ mini2440_lcd_cfg[li].width,
+ mini2440_lcd_cfg[li].height);
+ else
+ printk(" %d:%dx%d", li,
+ mini2440_lcd_cfg[li].width,
+ mini2440_lcd_cfg[li].height);
+ printk("\n");
+ s3c24xx_fb_set_platdata(&mini2440_fb_info);
+ }
+ s3c24xx_udc_set_platdata(&mini2440_udc_cfg);
+ s3c_i2c0_set_platdata(NULL);
+ i2c_register_board_info(0, mini2440_i2c_devs,
+ ARRAY_SIZE(mini2440_i2c_devs));
+
+ platform_add_devices(mini2440_devices, ARRAY_SIZE(mini2440_devices));
+
+ if (features.count) /* the optional features */
+ platform_add_devices(features.optional, features.count);
+
+}
+
+
+MACHINE_START(MINI2440, "MINI2440")
+ /* Maintainer: Michel Pollet <buserror@gmail.com> */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+ .map_io = mini2440_map_io,
+ .init_machine = mini2440_init,
+ .init_irq = s3c24xx_init_irq,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c2442/Kconfig b/arch/arm/mach-s3c2442/Kconfig
index b289d19..103e913 100644
--- a/arch/arm/mach-s3c2442/Kconfig
+++ b/arch/arm/mach-s3c2442/Kconfig
@@ -24,6 +24,18 @@
depends on ARCH_S3C2440
select CPU_S3C2442
+config MACH_NEO1973_GTA02
+ bool "Openmoko GTA02 / Freerunner phone"
+ select CPU_S3C2442
+ select MFD_PCF50633
+ select PCF50633_GPIO
+ select I2C
+ select POWER_SUPPLY
+ select MACH_NEO1973
+ select S3C2410_PWM
+ help
+ Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
+
endmenu
diff --git a/arch/arm/mach-s3c2442/Makefile b/arch/arm/mach-s3c2442/Makefile
index 2a909c6..2a19113 100644
--- a/arch/arm/mach-s3c2442/Makefile
+++ b/arch/arm/mach-s3c2442/Makefile
@@ -12,5 +12,7 @@
obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
obj-$(CONFIG_CPU_S3C2442) += clock.o
+obj-$(CONFIG_MACH_NEO1973_GTA02) += mach-gta02.o
+
# Machine support
diff --git a/arch/arm/mach-s3c2442/include/mach/gta02.h b/arch/arm/mach-s3c2442/include/mach/gta02.h
new file mode 100644
index 0000000..953331d
--- /dev/null
+++ b/arch/arm/mach-s3c2442/include/mach/gta02.h
@@ -0,0 +1,84 @@
+#ifndef _GTA02_H
+#define _GTA02_H
+
+#include <mach/regs-gpio.h>
+
+/* Different hardware revisions, passed in ATAG_REVISION by u-boot */
+#define GTA02v1_SYSTEM_REV 0x00000310
+#define GTA02v2_SYSTEM_REV 0x00000320
+#define GTA02v3_SYSTEM_REV 0x00000330
+#define GTA02v4_SYSTEM_REV 0x00000340
+#define GTA02v5_SYSTEM_REV 0x00000350
+/* since A7 is basically same as A6, we use A6 PCB ID */
+#define GTA02v6_SYSTEM_REV 0x00000360
+
+#define GTA02_GPIO_n3DL_GSM S3C2410_GPA(13) /* v1 + v2 + v3 only */
+
+#define GTA02_GPIO_PWR_LED1 S3C2410_GPB(0)
+#define GTA02_GPIO_PWR_LED2 S3C2410_GPB(1)
+#define GTA02_GPIO_AUX_LED S3C2410_GPB(2)
+#define GTA02_GPIO_VIBRATOR_ON S3C2410_GPB(3)
+#define GTA02_GPIO_MODEM_RST S3C2410_GPB(5)
+#define GTA02_GPIO_BT_EN S3C2410_GPB(6)
+#define GTA02_GPIO_MODEM_ON S3C2410_GPB(7)
+#define GTA02_GPIO_EXTINT8 S3C2410_GPB(8)
+#define GTA02_GPIO_USB_PULLUP S3C2410_GPB(9)
+
+#define GTA02_GPIO_PIO5 S3C2410_GPC(5) /* v3 + v4 only */
+
+#define GTA02v3_GPIO_nG1_CS S3C2410_GPD(12) /* v3 + v4 only */
+#define GTA02v3_GPIO_nG2_CS S3C2410_GPD(13) /* v3 + v4 only */
+#define GTA02v5_GPIO_HDQ S3C2410_GPD(14) /* v5 + */
+
+#define GTA02_GPIO_nG1_INT S3C2410_GPF(0)
+#define GTA02_GPIO_IO1 S3C2410_GPF(1)
+#define GTA02_GPIO_PIO_2 S3C2410_GPF(2) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_JACK_INSERT S3C2410_GPF(4)
+#define GTA02_GPIO_WLAN_GPIO1 S3C2410_GPF(5) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_AUX_KEY S3C2410_GPF(6)
+#define GTA02_GPIO_HOLD_KEY S3C2410_GPF(7)
+
+#define GTA02_GPIO_3D_IRQ S3C2410_GPG(4)
+#define GTA02v2_GPIO_nG2_INT S3C2410_GPG(8) /* v2 + v3 + v4 only */
+#define GTA02v3_GPIO_nUSB_OC S3C2410_GPG(9) /* v3 + v4 only */
+#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
+#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
+
+#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */
+#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2
+#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4
+#define GTA02_GPIO_3D_RESET S3C2440_GPJ5
+#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */
+#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7
+#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8
+#define GTA02_GPIO_KEEPACT S3C2440_GPJ8
+#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10
+#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */
+
+#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
+#define GTA02_IRQ_MODEM IRQ_EINT1
+#define GTA02_IRQ_PIO_2 IRQ_EINT2 /* v2 + v3 + v4 only */
+#define GTA02_IRQ_nJACK_INSERT IRQ_EINT4
+#define GTA02_IRQ_WLAN_GPIO1 IRQ_EINT5
+#define GTA02_IRQ_AUX IRQ_EINT6
+#define GTA02_IRQ_nHOLD IRQ_EINT7
+#define GTA02_IRQ_PCF50633 IRQ_EINT9
+#define GTA02_IRQ_3D IRQ_EINT12
+#define GTA02_IRQ_GSENSOR_2 IRQ_EINT16 /* v2 + v3 + v4 only */
+#define GTA02v3_IRQ_nUSB_OC IRQ_EINT17 /* v3 + v4 only */
+#define GTA02v3_IRQ_nUSB_FLT IRQ_EINT18 /* v3 + v4 only */
+#define GTA02v3_IRQ_nGSM_OC IRQ_EINT19 /* v3 + v4 only */
+
+/* returns 00 000 on GTA02 A5 and earlier, A6 returns 01 001 */
+#define GTA02_PCB_ID1_0 S3C2410_GPC(13)
+#define GTA02_PCB_ID1_1 S3C2410_GPC(15)
+#define GTA02_PCB_ID1_2 S3C2410_GPD(0)
+#define GTA02_PCB_ID2_0 S3C2410_GPD(3)
+#define GTA02_PCB_ID2_1 S3C2410_GPD(4)
+
+int gta02_get_pcb_revision(void);
+
+#endif /* _GTA02_H */
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c
new file mode 100644
index 0000000..e23b581
--- /dev/null
+++ b/arch/arm/mach-s3c2442/mach-gta02.c
@@ -0,0 +1,646 @@
+/*
+ * linux/arch/arm/mach-s3c2442/mach-gta02.c
+ *
+ * S3C2442 Machine Support for Openmoko GTA02 / FreeRunner.
+ *
+ * Copyright (C) 2006-2009 by Openmoko, Inc.
+ * Authors: Harald Welte <laforge@openmoko.org>
+ * Andy Green <andy@openmoko.org>
+ * Werner Almesberger <werner@openmoko.org>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/spi/spi.h>
+
+#include <linux/mmc/host.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/io.h>
+
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/regulator/machine.h>
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/mfd/pcf50633/mbc.h>
+#include <linux/mfd/pcf50633/adc.h>
+#include <linux/mfd/pcf50633/gpio.h>
+#include <linux/mfd/pcf50633/pmic.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-gpioj.h>
+#include <mach/fb.h>
+
+#include <mach/spi.h>
+#include <mach/spi-gpio.h>
+#include <plat/usb-control.h>
+#include <mach/regs-mem.h>
+#include <mach/hardware.h>
+
+#include <mach/gta02.h>
+
+#include <plat/regs-serial.h>
+#include <plat/nand.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+#include <plat/pm.h>
+#include <plat/udc.h>
+#include <plat/gpio-cfg.h>
+#include <plat/iic.h>
+
+static struct pcf50633 *gta02_pcf;
+
+/*
+ * This gets called every 1ms when we paniced.
+ */
+
+static long gta02_panic_blink(long count)
+{
+ long delay = 0;
+ static long last_blink;
+ static char led;
+
+ /* Fast blink: 200ms period. */
+ if (count - last_blink < 100)
+ return 0;
+
+ led ^= 1;
+ gpio_direction_output(GTA02_GPIO_AUX_LED, led);
+
+ last_blink = count;
+
+ return delay;
+}
+
+
+static struct map_desc gta02_iodesc[] __initdata = {
+ {
+ .virtual = 0xe0000000,
+ .pfn = __phys_to_pfn(S3C2410_CS3 + 0x01000000),
+ .length = SZ_1M,
+ .type = MT_DEVICE
+ },
+};
+
+#define UCON (S3C2410_UCON_DEFAULT | S3C2443_UCON_RXERR_IRQEN)
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
+
+static struct s3c2410_uartcfg gta02_uartcfgs[] = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+};
+
+#ifdef CONFIG_CHARGER_PCF50633
+/*
+ * On GTA02 the 1A charger features a 48K resistor to 0V on the ID pin.
+ * We use this to recognize that we can pull 1A from the USB socket.
+ *
+ * These constants are the measured pcf50633 ADC levels with the 1A
+ * charger / 48K resistor, and with no pulldown resistor.
+ */
+
+#define ADC_NOM_CHG_DETECT_1A 6
+#define ADC_NOM_CHG_DETECT_USB 43
+
+static void
+gta02_configure_pmu_for_charger(struct pcf50633 *pcf, void *unused, int res)
+{
+ int ma;
+
+ /* Interpret charger type */
+ if (res < ((ADC_NOM_CHG_DETECT_USB + ADC_NOM_CHG_DETECT_1A) / 2)) {
+
+ /*
+ * Sanity - stop GPO driving out now that we have a 1A charger
+ * GPO controls USB Host power generation on GTA02
+ */
+ pcf50633_gpio_set(pcf, PCF50633_GPO, 0);
+
+ ma = 1000;
+ } else
+ ma = 100;
+
+ pcf50633_mbc_usb_curlim_set(pcf, ma);
+}
+
+static struct delayed_work gta02_charger_work;
+static int gta02_usb_vbus_draw;
+
+static void gta02_charger_worker(struct work_struct *work)
+{
+ if (gta02_usb_vbus_draw) {
+ pcf50633_mbc_usb_curlim_set(gta02_pcf, gta02_usb_vbus_draw);
+ return;
+ }
+
+#ifdef CONFIG_PCF50633_ADC
+ pcf50633_adc_async_read(gta02_pcf,
+ PCF50633_ADCC1_MUX_ADCIN1,
+ PCF50633_ADCC1_AVERAGE_16,
+ gta02_configure_pmu_for_charger,
+ NULL);
+#else
+ /*
+ * If the PCF50633 ADC is disabled we fallback to a
+ * 100mA limit for safety.
+ */
+ pcf50633_mbc_usb_curlim_set(pcf, 100);
+#endif
+}
+
+#define GTA02_CHARGER_CONFIGURE_TIMEOUT ((3000 * HZ) / 1000)
+
+static void gta02_pmu_event_callback(struct pcf50633 *pcf, int irq)
+{
+ if (irq == PCF50633_IRQ_USBINS) {
+ schedule_delayed_work(>a02_charger_work,
+ GTA02_CHARGER_CONFIGURE_TIMEOUT);
+
+ return;
+ }
+
+ if (irq == PCF50633_IRQ_USBREM) {
+ cancel_delayed_work_sync(>a02_charger_work);
+ gta02_usb_vbus_draw = 0;
+ }
+}
+
+static void gta02_udc_vbus_draw(unsigned int ma)
+{
+ if (!gta02_pcf)
+ return;
+
+ gta02_usb_vbus_draw = ma;
+
+ schedule_delayed_work(>a02_charger_work,
+ GTA02_CHARGER_CONFIGURE_TIMEOUT);
+}
+#else /* !CONFIG_CHARGER_PCF50633 */
+#define gta02_pmu_event_callback NULL
+#define gta02_udc_vbus_draw NULL
+#endif
+
+/*
+ * This is called when pc50633 is probed, unfortunately quite late in the
+ * day since it is an I2C bus device. Here we can belatedly define some
+ * platform devices with the advantage that we can mark the pcf50633 as the
+ * parent. This makes them get suspended and resumed with their parent
+ * the pcf50633 still around.
+ */
+
+static void gta02_pmu_attach_child_devices(struct pcf50633 *pcf);
+
+
+static char *gta02_batteries[] = {
+ "battery",
+};
+
+struct pcf50633_platform_data gta02_pcf_pdata = {
+ .resumers = {
+ [0] = PCF50633_INT1_USBINS |
+ PCF50633_INT1_USBREM |
+ PCF50633_INT1_ALARM,
+ [1] = PCF50633_INT2_ONKEYF,
+ [2] = PCF50633_INT3_ONKEY1S,
+ [3] = PCF50633_INT4_LOWSYS |
+ PCF50633_INT4_LOWBAT |
+ PCF50633_INT4_HIGHTMP,
+ },
+
+ .batteries = gta02_batteries,
+ .num_batteries = ARRAY_SIZE(gta02_batteries),
+ .reg_init_data = {
+ [PCF50633_REGULATOR_AUTO] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .always_on = 1,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_DOWN1] = {
+ .constraints = {
+ .min_uV = 1300000,
+ .max_uV = 1600000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .always_on = 1,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_DOWN2] = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .always_on = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_HCLDO] = {
+ .constraints = {
+ .min_uV = 2000000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .always_on = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO1] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 0,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_LDO2] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO3] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO4] = {
+ .constraints = {
+ .min_uV = 3200000,
+ .max_uV = 3200000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO5] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_LDO6] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ },
+ },
+ [PCF50633_REGULATOR_MEMLDO] = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+
+ },
+ .probe_done = gta02_pmu_attach_child_devices,
+ .mbc_event_callback = gta02_pmu_event_callback,
+};
+
+
+/* NOR Flash. */
+
+#define GTA02_FLASH_BASE 0x18000000 /* GCS3 */
+#define GTA02_FLASH_SIZE 0x200000 /* 2MBytes */
+
+static struct physmap_flash_data gta02_nor_flash_data = {
+ .width = 2,
+};
+
+static struct resource gta02_nor_flash_resource = {
+ .start = GTA02_FLASH_BASE,
+ .end = GTA02_FLASH_BASE + GTA02_FLASH_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device gta02_nor_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = >a02_nor_flash_data,
+ },
+ .resource = >a02_nor_flash_resource,
+ .num_resources = 1,
+};
+
+
+struct platform_device s3c24xx_pwm_device = {
+ .name = "s3c24xx_pwm",
+ .num_resources = 0,
+};
+
+static struct i2c_board_info gta02_i2c_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("pcf50633", 0x73),
+ .irq = GTA02_IRQ_PCF50633,
+ .platform_data = >a02_pcf_pdata,
+ },
+ {
+ I2C_BOARD_INFO("wm8753", 0x1a),
+ },
+};
+
+static struct s3c2410_nand_set gta02_nand_sets[] = {
+ [0] = {
+ /*
+ * This name is also hard-coded in the boot loaders, so
+ * changing it would would require all users to upgrade
+ * their boot loaders, some of which are stored in a NOR
+ * that is considered to be immutable.
+ */
+ .name = "neo1973-nand",
+ .nr_chips = 1,
+ .use_bbt = 1,
+ .force_soft_ecc = 1,
+ },
+};
+
+/*
+ * Choose a set of timings derived from S3C@2442B MCP54
+ * data sheet (K5D2G13ACM-D075 MCP Memory).
+ */
+
+static struct s3c2410_platform_nand gta02_nand_info = {
+ .tacls = 0,
+ .twrph0 = 25,
+ .twrph1 = 15,
+ .nr_sets = ARRAY_SIZE(gta02_nand_sets),
+ .sets = gta02_nand_sets,
+};
+
+
+static void gta02_udc_command(enum s3c2410_udc_cmd_e cmd)
+{
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE:
+ pr_debug("%s S3C2410_UDC_P_ENABLE\n", __func__);
+ gpio_direction_output(GTA02_GPIO_USB_PULLUP, 1);
+ break;
+ case S3C2410_UDC_P_DISABLE:
+ pr_debug("%s S3C2410_UDC_P_DISABLE\n", __func__);
+ gpio_direction_output(GTA02_GPIO_USB_PULLUP, 0);
+ break;
+ case S3C2410_UDC_P_RESET:
+ pr_debug("%s S3C2410_UDC_P_RESET\n", __func__);
+ /* FIXME: Do something here. */
+ }
+}
+
+/* Get PMU to set USB current limit accordingly. */
+static struct s3c2410_udc_mach_info gta02_udc_cfg = {
+ .vbus_draw = gta02_udc_vbus_draw,
+ .udc_command = gta02_udc_command,
+
+};
+
+
+
+static void gta02_bl_set_intensity(int intensity)
+{
+ struct pcf50633 *pcf = gta02_pcf;
+ int old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
+
+ /* We map 8-bit intensity to 6-bit intensity in hardware. */
+ intensity >>= 2;
+
+ /*
+ * This can happen during, eg, print of panic on blanked console,
+ * but we can't service i2c without interrupts active, so abort.
+ */
+ if (in_atomic()) {
+ printk(KERN_ERR "gta02_bl_set_intensity called while atomic\n");
+ return;
+ }
+
+ old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
+ if (intensity == old_intensity)
+ return;
+
+ /* We can't do this anywhere else. */
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDDIM, 5);
+
+ if (!(pcf50633_reg_read(pcf, PCF50633_REG_LEDENA) & 3))
+ old_intensity = 0;
+
+ /*
+ * The PCF50633 cannot handle LEDOUT = 0 (datasheet p60)
+ * if seen, you have to re-enable the LED unit.
+ */
+ if (!intensity || !old_intensity)
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 0);
+
+ /* Illegal to set LEDOUT to 0. */
+ if (!intensity)
+ pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f, 2);
+ else
+ pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f,
+ intensity);
+
+ if (intensity)
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 2);
+
+}
+
+static struct generic_bl_info gta02_bl_info = {
+ .name = "gta02-bl",
+ .max_intensity = 0xff,
+ .default_intensity = 0xff,
+ .set_bl_intensity = gta02_bl_set_intensity,
+};
+
+static struct platform_device gta02_bl_dev = {
+ .name = "generic-bl",
+ .id = 1,
+ .dev = {
+ .platform_data = >a02_bl_info,
+ },
+};
+
+
+
+/* USB */
+static struct s3c2410_hcd_info gta02_usb_info = {
+ .port[0] = {
+ .flags = S3C_HCDFLG_USED,
+ },
+ .port[1] = {
+ .flags = 0,
+ },
+};
+
+
+static void __init gta02_map_io(void)
+{
+ s3c24xx_init_io(gta02_iodesc, ARRAY_SIZE(gta02_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(gta02_uartcfgs, ARRAY_SIZE(gta02_uartcfgs));
+}
+
+
+/* These are the guys that don't need to be children of PMU. */
+
+static struct platform_device *gta02_devices[] __initdata = {
+ &s3c_device_usb,
+ &s3c_device_wdt,
+ &s3c_device_sdi,
+ &s3c_device_usbgadget,
+ &s3c_device_nand,
+ >a02_nor_flash,
+ &s3c24xx_pwm_device,
+ &s3c_device_iis,
+ &s3c_device_i2c0,
+};
+
+/* These guys DO need to be children of PMU. */
+
+static struct platform_device *gta02_devices_pmu_children[] = {
+ >a02_bl_dev,
+};
+
+
+/*
+ * This is called when pc50633 is probed, quite late in the day since it is an
+ * I2C bus device. Here we can define platform devices with the advantage that
+ * we can mark the pcf50633 as the parent. This makes them get suspended and
+ * resumed with their parent the pcf50633 still around. All devices whose
+ * operation depends on something from pcf50633 must have this relationship
+ * made explicit like this, or suspend and resume will become an unreliable
+ * hellworld.
+ */
+
+static void gta02_pmu_attach_child_devices(struct pcf50633 *pcf)
+{
+ int n;
+
+ /* Grab a copy of the now probed PMU pointer. */
+ gta02_pcf = pcf;
+
+ for (n = 0; n < ARRAY_SIZE(gta02_devices_pmu_children); n++)
+ gta02_devices_pmu_children[n]->dev.parent = pcf->dev;
+
+ platform_add_devices(gta02_devices_pmu_children,
+ ARRAY_SIZE(gta02_devices_pmu_children));
+}
+
+static void gta02_poweroff(void)
+{
+ pcf50633_reg_set_bit_mask(gta02_pcf, PCF50633_REG_OOCSHDWN, 1, 1);
+}
+
+static void __init gta02_machine_init(void)
+{
+ /* Set the panic callback to make AUX LED blink at ~5Hz. */
+ panic_blink = gta02_panic_blink;
+
+ s3c_pm_init();
+
+#ifdef CONFIG_CHARGER_PCF50633
+ INIT_DELAYED_WORK(>a02_charger_work, gta02_charger_worker);
+#endif
+
+ s3c_device_usb.dev.platform_data = >a02_usb_info;
+ s3c_device_nand.dev.platform_data = >a02_nand_info;
+
+ s3c24xx_udc_set_platdata(>a02_udc_cfg);
+ s3c_i2c0_set_platdata(NULL);
+
+ i2c_register_board_info(0, gta02_i2c_devs, ARRAY_SIZE(gta02_i2c_devs));
+
+ platform_add_devices(gta02_devices, ARRAY_SIZE(gta02_devices));
+ pm_power_off = gta02_poweroff;
+}
+
+
+MACHINE_START(NEO1973_GTA02, "GTA02")
+ /* Maintainer: Nelson Castillo <arhuaco@freaks-unidos.net> */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+ .map_io = gta02_map_io,
+ .init_irq = s3c24xx_init_irq,
+ .init_machine = gta02_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 3a398be..03cd27d 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -62,6 +62,12 @@
#define SHIFT_ASR 0x40
#define SHIFT_RORRRX 0x60
+#define BAD_INSTR 0xdeadc0de
+
+/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
+#define IS_T32(hi16) \
+ (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
+
static unsigned long ai_user;
static unsigned long ai_sys;
static unsigned long ai_skipped;
@@ -332,38 +338,48 @@
struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
+ unsigned int rd2;
+ int load;
- if (((rd & 1) == 1) || (rd == 14))
+ if ((instr & 0xfe000000) == 0xe8000000) {
+ /* ARMv7 Thumb-2 32-bit LDRD/STRD */
+ rd2 = (instr >> 8) & 0xf;
+ load = !!(LDST_L_BIT(instr));
+ } else if (((rd & 1) == 1) || (rd == 14))
goto bad;
+ else {
+ load = ((instr & 0xf0) == 0xd0);
+ rd2 = rd + 1;
+ }
ai_dword += 1;
if (user_mode(regs))
goto user;
- if ((instr & 0xf0) == 0xd0) {
+ if (load) {
unsigned long val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32_unaligned_check(val, addr + 4);
- regs->uregs[rd + 1] = val;
+ regs->uregs[rd2] = val;
} else {
put32_unaligned_check(regs->uregs[rd], addr);
- put32_unaligned_check(regs->uregs[rd + 1], addr + 4);
+ put32_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
user:
- if ((instr & 0xf0) == 0xd0) {
+ if (load) {
unsigned long val;
get32t_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32t_unaligned_check(val, addr + 4);
- regs->uregs[rd + 1] = val;
+ regs->uregs[rd2] = val;
} else {
put32t_unaligned_check(regs->uregs[rd], addr);
- put32t_unaligned_check(regs->uregs[rd + 1], addr + 4);
+ put32t_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
@@ -616,10 +632,74 @@
/* Else fall through for illegal instruction case */
default:
- return 0xdeadc0de;
+ return BAD_INSTR;
}
}
+/*
+ * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
+ * handlable by ARM alignment handler, also find the corresponding handler,
+ * so that we can reuse ARM userland alignment fault fixups for Thumb.
+ *
+ * @pinstr: original Thumb-2 instruction; returns new handlable instruction
+ * @regs: register context.
+ * @poffset: return offset from faulted addr for later writeback
+ *
+ * NOTES:
+ * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
+ * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
+ */
+static void *
+do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+ union offset_union *poffset)
+{
+ unsigned long instr = *pinstr;
+ u16 tinst1 = (instr >> 16) & 0xffff;
+ u16 tinst2 = instr & 0xffff;
+ poffset->un = 0;
+
+ switch (tinst1 & 0xffe0) {
+ /* A6.3.5 Load/Store multiple */
+ case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
+ case 0xe8a0: /* ...above writeback version */
+ case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */
+ case 0xe920: /* ...above writeback version */
+ /* no need offset decision since handler calculates it */
+ return do_alignment_ldmstm;
+
+ case 0xf840: /* POP/PUSH T3 (single register) */
+ if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
+ u32 L = !!(LDST_L_BIT(instr));
+ const u32 subset[2] = {
+ 0xe92d0000, /* STMDB sp!,{registers} */
+ 0xe8bd0000, /* LDMIA sp!,{registers} */
+ };
+ *pinstr = subset[L] | (1<<RD_BITS(instr));
+ return do_alignment_ldmstm;
+ }
+ /* Else fall through for illegal instruction case */
+ break;
+
+ /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
+ case 0xe860:
+ case 0xe960:
+ case 0xe8e0:
+ case 0xe9e0:
+ poffset->un = (tinst2 & 0xff) << 2;
+ case 0xe940:
+ case 0xe9c0:
+ return do_alignment_ldrdstrd;
+
+ /*
+ * No need to handle load/store instructions up to word size
+ * since ARMv6 and later CPUs can perform unaligned accesses.
+ */
+ default:
+ break;
+ }
+ return NULL;
+}
+
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -630,6 +710,8 @@
mm_segment_t fs;
unsigned int fault;
u16 tinstr = 0;
+ int isize = 4;
+ int thumb2_32b = 0;
instrptr = instruction_pointer(regs);
@@ -637,8 +719,19 @@
set_fs(KERNEL_DS);
if (thumb_mode(regs)) {
fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
- if (!(fault))
- instr = thumb2arm(tinstr);
+ if (!fault) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+ IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2 = 0;
+ fault = __get_user(tinst2, (u16 *)(instrptr+2));
+ instr = (tinstr << 16) | tinst2;
+ thumb2_32b = 1;
+ } else {
+ isize = 2;
+ instr = thumb2arm(tinstr);
+ }
+ }
} else
fault = __get_user(instr, (u32 *)instrptr);
set_fs(fs);
@@ -655,7 +748,7 @@
fixup:
- regs->ARM_pc += thumb_mode(regs) ? 2 : 4;
+ regs->ARM_pc += isize;
switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */
@@ -714,18 +807,25 @@
handler = do_alignment_ldrstr;
break;
- case 0x08000000: /* ldm or stm */
- handler = do_alignment_ldmstm;
+ case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
+ if (thumb2_32b)
+ handler = do_alignment_t32_to_handler(&instr, regs, &offset);
+ else
+ handler = do_alignment_ldmstm;
break;
default:
goto bad;
}
+ if (!handler)
+ goto bad;
type = handler(addr, instr, regs);
- if (type == TYPE_ERROR || type == TYPE_FAULT)
+ if (type == TYPE_ERROR || type == TYPE_FAULT) {
+ regs->ARM_pc -= isize;
goto bad_or_fault;
+ }
if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset);
@@ -735,7 +835,6 @@
bad_or_fault:
if (type == TYPE_ERROR)
goto bad;
- regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
/*
* We got a fault - fix it up, or die.
*/
@@ -751,8 +850,8 @@
*/
printk(KERN_ERR "Alignment trap: not handling instruction "
"%0*lx at [<%08lx>]\n",
- thumb_mode(regs) ? 4 : 8,
- thumb_mode(regs) ? tinstr : instr, instrptr);
+ isize << 1,
+ isize == 2 ? tinstr : instr, instrptr);
ai_skipped += 1;
return 1;
@@ -763,8 +862,8 @@
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
- thumb_mode(regs) ? 4 : 8,
- thumb_mode(regs) ? tinstr : instr,
+ isize << 1,
+ isize == 2 ? tinstr : instr,
addr, fsr);
if (ai_usermode & UM_FIXUP)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index fdaa9bb..4722582 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -836,6 +836,13 @@
BOOTMEM_EXCLUSIVE);
}
+ if (machine_is_treo680()) {
+ reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ }
+
if (machine_is_palmt5())
reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
BOOTMEM_EXCLUSIVE);
diff --git a/arch/arm/plat-omap/include/mach/sram.h b/arch/arm/plat-omap/include/mach/sram.h
index dca7c16..4d53cc5 100644
--- a/arch/arm/plat-omap/include/mach/sram.h
+++ b/arch/arm/plat-omap/include/mach/sram.h
@@ -24,7 +24,8 @@
extern u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2,
- u32 unlock_dll);
+ u32 unlock_dll, u32 f, u32 sdrc_mr,
+ u32 inc);
/* Do not use these */
extern void omap1_sram_reprogram_clock(u32 ckctl, u32 dpllctl);
@@ -62,7 +63,8 @@
extern u32 omap3_sram_configure_core_dpll(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2,
- u32 unlock_dll);
+ u32 unlock_dll, u32 f, u32 sdrc_mr,
+ u32 inc);
extern unsigned long omap3_sram_configure_core_dpll_sz;
#endif
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index a5b9bcd..65006df 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -371,15 +371,17 @@
static u32 (*_omap3_sram_configure_core_dpll)(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb,
- u32 m2, u32 unlock_dll);
+ u32 m2, u32 unlock_dll,
+ u32 f, u32 sdrc_mr, u32 inc);
u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, u32 sdrc_actim_ctrla,
- u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll)
+ u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll,
+ u32 f, u32 sdrc_mr, u32 inc)
{
BUG_ON(!_omap3_sram_configure_core_dpll);
return _omap3_sram_configure_core_dpll(sdrc_rfr_ctrl,
sdrc_actim_ctrla,
sdrc_actim_ctrlb, m2,
- unlock_dll);
+ unlock_dll, f, sdrc_mr, inc);
}
/* REVISIT: Should this be same as omap34xx_sram_init() after off-idle? */
diff --git a/arch/arm/plat-s3c/Makefile b/arch/arm/plat-s3c/Makefile
index 6106514..74bb7cb 100644
--- a/arch/arm/plat-s3c/Makefile
+++ b/arch/arm/plat-s3c/Makefile
@@ -34,6 +34,7 @@
obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o
obj-y += dev-i2c0.o
obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
+obj-$(CONFIG_SND_S3C24XX_SOC) += dev-audio.o
obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o
diff --git a/arch/arm/plat-s3c/dev-audio.c b/arch/arm/plat-s3c/dev-audio.c
new file mode 100644
index 0000000..1322beb
--- /dev/null
+++ b/arch/arm/plat-s3c/dev-audio.c
@@ -0,0 +1,68 @@
+/* linux/arch/arm/plat-s3c/dev-audio.c
+ *
+ * Copyright 2009 Wolfson Microelectronics
+ * Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+
+
+static struct resource s3c64xx_iis0_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IIS0,
+ .end = S3C64XX_PA_IIS0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iis0 = {
+ .name = "s3c64xx-iis",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
+ .resource = s3c64xx_iis0_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iis0);
+
+static struct resource s3c64xx_iis1_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IIS1,
+ .end = S3C64XX_PA_IIS1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iis1 = {
+ .name = "s3c64xx-iis",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
+ .resource = s3c64xx_iis1_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iis1);
+
+static struct resource s3c64xx_iisv4_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IISV4,
+ .end = S3C64XX_PA_IISV4 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iisv4 = {
+ .name = "s3c64xx-iis-v4",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s3c64xx_iisv4_resource),
+ .resource = s3c64xx_iisv4_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iisv4);
diff --git a/arch/arm/plat-s3c/gpio-config.c b/arch/arm/plat-s3c/gpio-config.c
index 08044de..456969b 100644
--- a/arch/arm/plat-s3c/gpio-config.c
+++ b/arch/arm/plat-s3c/gpio-config.c
@@ -119,7 +119,7 @@
unsigned int shift = (off & 7) * 4;
u32 con;
- if (off < 8 && chip->chip.ngpio >= 8)
+ if (off < 8 && chip->chip.ngpio > 8)
reg -= 4;
if (s3c_gpio_is_cfg_special(cfg)) {
diff --git a/arch/arm/plat-s3c/include/plat/devs.h b/arch/arm/plat-s3c/include/plat/devs.h
index a0b6768..b5b9c4d 100644
--- a/arch/arm/plat-s3c/include/plat/devs.h
+++ b/arch/arm/plat-s3c/include/plat/devs.h
@@ -24,13 +24,16 @@
extern struct platform_device s3c_device_timer[];
+extern struct platform_device s3c64xx_device_iis0;
+extern struct platform_device s3c64xx_device_iis1;
+extern struct platform_device s3c64xx_device_iisv4;
+
extern struct platform_device s3c_device_fb;
extern struct platform_device s3c_device_usb;
extern struct platform_device s3c_device_lcd;
extern struct platform_device s3c_device_wdt;
extern struct platform_device s3c_device_i2c0;
extern struct platform_device s3c_device_i2c1;
-extern struct platform_device s3c_device_iis;
extern struct platform_device s3c_device_rtc;
extern struct platform_device s3c_device_adc;
extern struct platform_device s3c_device_sdi;
diff --git a/arch/arm/plat-s3c/include/plat/nand.h b/arch/arm/plat-s3c/include/plat/nand.h
index f4dcd14..18f9588 100644
--- a/arch/arm/plat-s3c/include/plat/nand.h
+++ b/arch/arm/plat-s3c/include/plat/nand.h
@@ -10,19 +10,26 @@
* published by the Free Software Foundation.
*/
-/* struct s3c2410_nand_set
+/**
+ * struct s3c2410_nand_set - define a set of one or more nand chips
+ * @disable_ecc: Entirely disable ECC - Dangerous
+ * @flash_bbt: Openmoko u-boot can create a Bad Block Table
+ * Setting this flag will allow the kernel to
+ * look for it at boot time and also skip the NAND
+ * scan.
+ * @nr_chips: Number of chips in this set
+ * @nr_partitions: Number of partitions pointed to by @partitions
+ * @name: Name of set (optional)
+ * @nr_map: Map for low-layer logical to physical chip numbers (option)
+ * @partitions: The mtd partition list
*
- * define an set of one or more nand chips registered with an unique mtd
- *
- * nr_chips = number of chips in this set
- * nr_partitions = number of partitions pointed to be partitoons (or zero)
- * name = name of set (optional)
- * nr_map = map for low-layer logical to physical chip numbers (option)
- * partitions = mtd partition list
-*/
-
+ * define a set of one or more nand chips registered with an unique mtd. Also
+ * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger
+ * a warning at boot time.
+ */
struct s3c2410_nand_set {
- unsigned int disable_ecc : 1;
+ unsigned int disable_ecc:1;
+ unsigned int flash_bbt:1;
int nr_chips;
int nr_partitions;
@@ -39,7 +46,7 @@
int twrph0; /* active time for nWE/nOE */
int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
- unsigned int ignore_unset_ecc : 1;
+ unsigned int ignore_unset_ecc:1;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/arch/arm/plat-s3c64xx/Makefile b/arch/arm/plat-s3c64xx/Makefile
index 2ed5df3..3c8882c 100644
--- a/arch/arm/plat-s3c64xx/Makefile
+++ b/arch/arm/plat-s3c64xx/Makefile
@@ -23,6 +23,7 @@
obj-$(CONFIG_CPU_S3C6400_INIT) += s3c6400-init.o
obj-$(CONFIG_CPU_S3C6400_CLOCK) += s3c6400-clock.o
+obj-$(CONFIG_CPU_FREQ_S3C64XX) += cpufreq.o
# PM support
diff --git a/arch/arm/plat-s3c64xx/clock.c b/arch/arm/plat-s3c64xx/clock.c
index 0bc2fa1..7a36e89 100644
--- a/arch/arm/plat-s3c64xx/clock.c
+++ b/arch/arm/plat-s3c64xx/clock.c
@@ -191,7 +191,7 @@
.id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_UHOST,
+ .ctrlbit = S3C_CLKCON_HCLK_UHOST,
}, {
.name = "hsmmc",
.id = 0,
diff --git a/arch/arm/plat-s3c64xx/cpufreq.c b/arch/arm/plat-s3c64xx/cpufreq.c
new file mode 100644
index 0000000..e6e0843
--- /dev/null
+++ b/arch/arm/plat-s3c64xx/cpufreq.c
@@ -0,0 +1,262 @@
+/* linux/arch/arm/plat-s3c64xx/cpufreq.c
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * S3C64xx CPUfreq Support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+
+static struct clk *armclk;
+static struct regulator *vddarm;
+
+#ifdef CONFIG_CPU_S3C6410
+struct s3c64xx_dvfs {
+ unsigned int vddarm_min;
+ unsigned int vddarm_max;
+};
+
+static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ [0] = { 1000000, 1000000 },
+ [1] = { 1000000, 1050000 },
+ [2] = { 1050000, 1100000 },
+ [3] = { 1050000, 1150000 },
+ [4] = { 1250000, 1350000 },
+};
+
+static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ { 0, 66000 },
+ { 0, 133000 },
+ { 1, 222000 },
+ { 1, 266000 },
+ { 2, 333000 },
+ { 2, 400000 },
+ { 3, 532000 },
+ { 3, 533000 },
+ { 4, 667000 },
+ { 0, CPUFREQ_TABLE_END },
+};
+#endif
+
+static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
+}
+
+static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
+{
+ if (cpu != 0)
+ return 0;
+
+ return clk_get_rate(armclk) / 1000;
+}
+
+static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int ret;
+ unsigned int i;
+ struct cpufreq_freqs freqs;
+ struct s3c64xx_dvfs *dvfs;
+
+ ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
+ target_freq, relation, &i);
+ if (ret != 0)
+ return ret;
+
+ freqs.cpu = 0;
+ freqs.old = clk_get_rate(armclk) / 1000;
+ freqs.new = s3c64xx_freq_table[i].frequency;
+ freqs.flags = 0;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && freqs.new > freqs.old) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ freqs.new, ret);
+ goto err;
+ }
+ }
+#endif
+
+ ret = clk_set_rate(armclk, freqs.new * 1000);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
+ freqs.new, ret);
+ goto err;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && freqs.new < freqs.old) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ freqs.new, ret);
+ goto err_clk;
+ }
+ }
+#endif
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ pr_debug("cpufreq: Set actual frequency %lukHz\n",
+ clk_get_rate(armclk) / 1000);
+
+ return 0;
+
+err_clk:
+ if (clk_set_rate(armclk, freqs.old * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+err:
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+#ifdef CONFIG_REGULATOR
+static void __init s3c64xx_cpufreq_constrain_voltages(void)
+{
+ int count, v, i, found;
+ struct cpufreq_frequency_table *freq;
+ struct s3c64xx_dvfs *dvfs;
+
+ count = regulator_count_voltages(vddarm);
+ if (count < 0) {
+ pr_err("cpufreq: Unable to check supported voltages\n");
+ return;
+ }
+
+ freq = s3c64xx_freq_table;
+ while (freq->frequency != CPUFREQ_TABLE_END) {
+ if (freq->frequency == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ dvfs = &s3c64xx_dvfs_table[freq->index];
+ found = 0;
+
+ for (i = 0; i < count; i++) {
+ v = regulator_list_voltage(vddarm, i);
+ if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
+ found = 1;
+ }
+
+ if (!found) {
+ pr_debug("cpufreq: %dkHz unsupported by regulator\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ freq++;
+ }
+}
+#endif
+
+static int __init s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ int ret;
+ struct cpufreq_frequency_table *freq;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ if (s3c64xx_freq_table == NULL) {
+ pr_err("cpufreq: No frequency information for this CPU\n");
+ return -ENODEV;
+ }
+
+ armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(armclk)) {
+ pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
+ PTR_ERR(armclk));
+ return PTR_ERR(armclk);
+ }
+
+#ifdef CONFIG_REGULATOR
+ vddarm = regulator_get(NULL, "vddarm");
+ if (IS_ERR(vddarm)) {
+ ret = PTR_ERR(vddarm);
+ pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
+ pr_err("cpufreq: Only frequency scaling available\n");
+ vddarm = NULL;
+ } else {
+ s3c64xx_cpufreq_constrain_voltages();
+ }
+#endif
+
+ freq = s3c64xx_freq_table;
+ while (freq->frequency != CPUFREQ_TABLE_END) {
+ unsigned long r;
+
+ /* Check for frequencies we can generate */
+ r = clk_round_rate(armclk, freq->frequency * 1000);
+ r /= 1000;
+ if (r != freq->frequency)
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+
+ /* If we have no regulator then assume startup
+ * frequency is the maximum we can support. */
+ if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+
+ freq++;
+ }
+
+ policy->cur = clk_get_rate(armclk) / 1000;
+
+ /* Pick a conservative guess in ns: we'll need ~1 I2C/SPI
+ * write plus clock reprogramming. */
+ policy->cpuinfo.transition_latency = 2 * 1000 * 1000;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to configure frequency table: %d\n",
+ ret);
+ regulator_put(vddarm);
+ clk_put(armclk);
+ }
+
+ return ret;
+}
+
+static struct cpufreq_driver s3c64xx_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .flags = 0,
+ .verify = s3c64xx_cpufreq_verify_speed,
+ .target = s3c64xx_cpufreq_set_target,
+ .get = s3c64xx_cpufreq_get_speed,
+ .init = s3c64xx_cpufreq_driver_init,
+ .name = "s3c",
+};
+
+static int __init s3c64xx_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
+}
+module_init(s3c64xx_cpufreq_init);
diff --git a/arch/arm/plat-s3c64xx/gpiolib.c b/arch/arm/plat-s3c64xx/gpiolib.c
index da7b60e..9285929 100644
--- a/arch/arm/plat-s3c64xx/gpiolib.c
+++ b/arch/arm/plat-s3c64xx/gpiolib.c
@@ -321,6 +321,11 @@
.get_pull = s3c_gpio_getpull_updown,
};
+int s3c64xx_gpio2int_gpn(struct gpio_chip *chip, unsigned pin)
+{
+ return IRQ_EINT(0) + pin;
+}
+
static struct s3c_gpio_chip gpio_2bit[] = {
{
.base = S3C64XX_GPF_BASE,
@@ -353,6 +358,7 @@
.base = S3C64XX_GPN(0),
.ngpio = S3C64XX_GPIO_N_NR,
.label = "GPN",
+ .to_irq = s3c64xx_gpio2int_gpn,
},
}, {
.base = S3C64XX_GPO_BASE,
diff --git a/arch/arm/plat-s3c64xx/include/plat/regs-clock.h b/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
index 52836d4..a8777a7 100644
--- a/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
+++ b/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
@@ -88,11 +88,11 @@
#define S3C6400_CLKDIV2_SPI0_SHIFT (0)
/* HCLK GATE Registers */
-#define S3C_CLKCON_HCLK_BUS (1<<30)
-#define S3C_CLKCON_HCLK_SECUR (1<<29)
-#define S3C_CLKCON_HCLK_SDMA1 (1<<28)
-#define S3C_CLKCON_HCLK_SDMA2 (1<<27)
-#define S3C_CLKCON_HCLK_UHOST (1<<26)
+#define S3C_CLKCON_HCLK_3DSE (1<<31)
+#define S3C_CLKCON_HCLK_UHOST (1<<29)
+#define S3C_CLKCON_HCLK_SECUR (1<<28)
+#define S3C_CLKCON_HCLK_SDMA1 (1<<27)
+#define S3C_CLKCON_HCLK_SDMA0 (1<<26)
#define S3C_CLKCON_HCLK_IROM (1<<25)
#define S3C_CLKCON_HCLK_DDR1 (1<<24)
#define S3C_CLKCON_HCLK_DDR0 (1<<23)
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index fec6467..33026ef 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Fri May 29 10:14:20 2009
+# Last update: Sat Jun 20 22:28:39 2009
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -1455,7 +1455,7 @@
h6044 MACH_H6044 H6044 1458
app MACH_APP APP 1459
tct_hammer MACH_TCT_HAMMER TCT_HAMMER 1460
-herald MACH_HERMES HERMES 1461
+herald MACH_HERALD HERALD 1461
artemis MACH_ARTEMIS ARTEMIS 1462
htctitan MACH_HTCTITAN HTCTITAN 1463
qranium MACH_QRANIUM QRANIUM 1464
@@ -2245,3 +2245,38 @@
omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
simcom MACH_SIMCOM SIMCOM 2259
mcwebio MACH_MCWEBIO MCWEBIO 2260
+omap3_phrazer MACH_OMAP3_PHRAZER OMAP3_PHRAZER 2261
+darwin MACH_DARWIN DARWIN 2262
+oratiscomu MACH_ORATISCOMU ORATISCOMU 2263
+rtsbc20 MACH_RTSBC20 RTSBC20 2264
+i780 MACH_I780 I780 2265
+gemini324 MACH_GEMINI324 GEMINI324 2266
+oratislan MACH_ORATISLAN ORATISLAN 2267
+oratisalog MACH_ORATISALOG ORATISALOG 2268
+oratismadi MACH_ORATISMADI ORATISMADI 2269
+oratisot16 MACH_ORATISOT16 ORATISOT16 2270
+oratisdesk MACH_ORATISDESK ORATISDESK 2271
+v2p_ca9 MACH_V2P_CA9 V2P_CA9 2272
+sintexo MACH_SINTEXO SINTEXO 2273
+cm3389 MACH_CM3389 CM3389 2274
+omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
+sgh_i900 MACH_SGH_I900 SGH_I900 2276
+bst100 MACH_BST100 BST100 2277
+passion MACH_PASSION PASSION 2278
+indesign_at91sam MACH_INDESIGN_AT91SAM INDESIGN_AT91SAM 2279
+c4_badger MACH_C4_BADGER C4_BADGER 2280
+c4_viper MACH_C4_VIPER C4_VIPER 2281
+d2net MACH_D2NET D2NET 2282
+bigdisk MACH_BIGDISK BIGDISK 2283
+notalvision MACH_NOTALVISION NOTALVISION 2284
+omap3_kboc MACH_OMAP3_KBOC OMAP3_KBOC 2285
+cyclone MACH_CYCLONE CYCLONE 2286
+ninja MACH_NINJA NINJA 2287
+at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288
+bcmring MACH_BCMRING BCMRING 2289
+resol_dl2 MACH_RESOL_DL2 RESOL_DL2 2290
+ifosw MACH_IFOSW IFOSW 2291
+htcrhodium MACH_HTCRHODIUM HTCRHODIUM 2292
+htctopaz MACH_HTCTOPAZ HTCTOPAZ 2293
+matrix504 MACH_MATRIX504 MATRIX504 2294
+mrfsa MACH_MRFSA MRFSA 2295
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 8ea0d94..7faa2f5 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -274,7 +274,7 @@
config BF_REV_0_1
bool "0.1"
- depends on (BF52x || (BF54x && !BF54xM))
+ depends on (BF51x || BF52x || (BF54x && !BF54xM))
config BF_REV_0_2
bool "0.2"
@@ -358,7 +358,7 @@
config MEM_MT48LC32M16A2TG_75
bool
- depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP || BFIN526_EZBRD)
+ depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP)
default y
config MEM_MT48LC32M8A2_75
@@ -366,6 +366,11 @@
depends on (BFIN518F_EZBRD)
default y
+config MEM_MT48H32M16LFCJ_75
+ bool
+ depends on (BFIN526_EZBRD)
+ default y
+
source "arch/blackfin/mach-bf518/Kconfig"
source "arch/blackfin/mach-bf527/Kconfig"
source "arch/blackfin/mach-bf533/Kconfig"
@@ -623,7 +628,6 @@
config TICKSOURCE_GPTMR0
bool "Gptimer0 (SCLK domain)"
select BFIN_GPTIMERS
- depends on !IPIPE
config TICKSOURCE_CORETMR
bool "Core timer (CCLK domain)"
@@ -644,6 +648,7 @@
config GPTMR0_CLOCKSOURCE
bool "Use GPTimer0 as a clocksource (higher rating)"
+ select BFIN_GPTIMERS
depends on GENERIC_CLOCKEVENTS
depends on !TICKSOURCE_GPTMR0
@@ -908,76 +913,97 @@
comment "Cache Support"
+
config BFIN_ICACHE
bool "Enable ICACHE"
+ default y
+config BFIN_ICACHE_LOCK
+ bool "Enable Instruction Cache Locking"
+ depends on BFIN_ICACHE
+ default n
+config BFIN_EXTMEM_ICACHEABLE
+ bool "Enable ICACHE for external memory"
+ depends on BFIN_ICACHE
+ default y
+config BFIN_L2_ICACHEABLE
+ bool "Enable ICACHE for L2 SRAM"
+ depends on BFIN_ICACHE
+ depends on BF54x || BF561
+ default n
+
config BFIN_DCACHE
bool "Enable DCACHE"
+ default y
config BFIN_DCACHE_BANKA
bool "Enable only 16k BankA DCACHE - BankB is SRAM"
depends on BFIN_DCACHE && !BF531
default n
-config BFIN_ICACHE_LOCK
- bool "Enable Instruction Cache Locking"
-
-choice
- prompt "External memory cache policy"
+config BFIN_EXTMEM_DCACHEABLE
+ bool "Enable DCACHE for external memory"
depends on BFIN_DCACHE
- default BFIN_WB if !SMP
- default BFIN_WT if SMP
-config BFIN_WB
- bool "Write back"
- depends on !SMP
- help
- Write Back Policy:
- Cached data will be written back to SDRAM only when needed.
- This can give a nice increase in performance, but beware of
- broken drivers that do not properly invalidate/flush their
- cache.
-
- Write Through Policy:
- Cached data will always be written back to SDRAM when the
- cache is updated. This is a completely safe setting, but
- performance is worse than Write Back.
-
- If you are unsure of the options and you want to be safe,
- then go with Write Through.
-
-config BFIN_WT
- bool "Write through"
- help
- Write Back Policy:
- Cached data will be written back to SDRAM only when needed.
- This can give a nice increase in performance, but beware of
- broken drivers that do not properly invalidate/flush their
- cache.
-
- Write Through Policy:
- Cached data will always be written back to SDRAM when the
- cache is updated. This is a completely safe setting, but
- performance is worse than Write Back.
-
- If you are unsure of the options and you want to be safe,
- then go with Write Through.
-
-endchoice
-
+ default y
choice
- prompt "L2 SRAM cache policy"
- depends on (BF54x || BF561)
- default BFIN_L2_WT
-config BFIN_L2_WB
+ prompt "External memory DCACHE policy"
+ depends on BFIN_EXTMEM_DCACHEABLE
+ default BFIN_EXTMEM_WRITEBACK if !SMP
+ default BFIN_EXTMEM_WRITETHROUGH if SMP
+config BFIN_EXTMEM_WRITEBACK
bool "Write back"
depends on !SMP
+ help
+ Write Back Policy:
+ Cached data will be written back to SDRAM only when needed.
+ This can give a nice increase in performance, but beware of
+ broken drivers that do not properly invalidate/flush their
+ cache.
-config BFIN_L2_WT
+ Write Through Policy:
+ Cached data will always be written back to SDRAM when the
+ cache is updated. This is a completely safe setting, but
+ performance is worse than Write Back.
+
+ If you are unsure of the options and you want to be safe,
+ then go with Write Through.
+
+config BFIN_EXTMEM_WRITETHROUGH
bool "Write through"
- depends on !SMP
+ help
+ Write Back Policy:
+ Cached data will be written back to SDRAM only when needed.
+ This can give a nice increase in performance, but beware of
+ broken drivers that do not properly invalidate/flush their
+ cache.
-config BFIN_L2_NOT_CACHED
- bool "Not cached"
+ Write Through Policy:
+ Cached data will always be written back to SDRAM when the
+ cache is updated. This is a completely safe setting, but
+ performance is worse than Write Back.
+
+ If you are unsure of the options and you want to be safe,
+ then go with Write Through.
endchoice
+config BFIN_L2_DCACHEABLE
+ bool "Enable DCACHE for L2 SRAM"
+ depends on BFIN_DCACHE
+ depends on BF54x || BF561
+ default n
+choice
+ prompt "L2 SRAM DCACHE policy"
+ depends on BFIN_L2_DCACHEABLE
+ default BFIN_L2_WRITEBACK
+config BFIN_L2_WRITEBACK
+ bool "Write back"
+ depends on !SMP
+
+config BFIN_L2_WRITETHROUGH
+ bool "Write through"
+ depends on !SMP
+endchoice
+
+
+comment "Memory Protection Unit"
config MPU
bool "Enable the memory protection unit (EXPERIMENTAL)"
default n
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 3ab6f23..fd9ccc5 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -13,7 +13,7 @@
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
- -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
+ -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
-e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
-d $< $@
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index baec133..dcfb488 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -326,11 +326,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -413,11 +419,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -916,7 +922,7 @@
# CONFIG_MMC_SDHCI is not set
CONFIG_SDH_BFIN=m
CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y
-CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ=y
+# CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -1147,7 +1153,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index c06262e..48a3a7a 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -331,16 +331,18 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_MPU is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
#
-# Asynchonous Memory Configuration
+# Memory Protection Unit
#
+# CONFIG_MPU is not set
#
# EBIU_AMGCTL Global Control
@@ -418,11 +420,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1424,7 +1426,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index e9175c6..dd83527 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -331,11 +331,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -418,11 +424,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1505,7 +1511,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index 5aa63ba..4c04480 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -292,12 +292,21 @@
#
# Cache Support
#
+#
+# Cache Support
+#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -391,11 +400,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1052,7 +1061,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index fed2532..c99bbcd 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -293,11 +293,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -391,11 +397,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1216,7 +1222,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index f9ac20d..092ffda 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -300,11 +300,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -399,11 +405,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1269,7 +1275,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index ee98e22..fa698a8 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -311,11 +311,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -398,11 +404,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1203,7 +1209,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index deeabef..b3d3cab 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -366,14 +366,19 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_BFIN_L2_WB is not set
-CONFIG_BFIN_L2_WT=y
-# CONFIG_BFIN_L2_NOT_CACHED is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -459,11 +464,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1606,7 +1611,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index dcfbe2e..0313cd1 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -331,14 +331,19 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_BFIN_L2_WB is not set
-CONFIG_BFIN_L2_WT=y
-# CONFIG_BFIN_L2_NOT_CACHED is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -425,11 +430,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1044,7 +1049,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 174c578..5d944ff 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -285,11 +285,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index e17875e..648a31d 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -329,11 +329,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -417,11 +423,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1246,7 +1252,7 @@
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index fafd95e..ae665b9 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -262,12 +262,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -353,10 +358,10 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_DIAG=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -873,7 +878,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index e73aa5a..d74b6f4 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -297,11 +297,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -383,11 +389,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -861,7 +867,7 @@
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 8021130..7fc8dfa 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -270,12 +270,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -361,10 +366,10 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_DIAG=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -901,7 +906,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index dd815f0..acca4e5 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -333,12 +333,19 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -428,11 +435,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1334,7 +1341,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_SAMPLES is not set
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index 16c198b..bae4ee6 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -308,12 +308,19 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -395,11 +402,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -837,7 +844,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_SAMPLES is not set
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 6b4c1a9..a6a7c8e 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -258,12 +258,18 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_ICACHE_LOCK=y
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
+# CONFIG_MPU is not set
#
# Asynchonous Memory Configuration
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index 09701f9..ff377fa 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -295,11 +295,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -382,11 +388,11 @@
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index ec84a53..814f9ca 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -279,12 +279,18 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
+# CONFIG_MPU is not set
#
# Asynchonous Memory Configuration
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 6e27962..375e75a 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -287,11 +287,17 @@
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -709,7 +715,7 @@
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index 8bb2cb1..4d44395 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -86,6 +86,7 @@
#endif /* __ASSEMBLY__ */
+#include <asm/mem_map.h>
#include <mach/blackfin.h>
#include <asm/bfin-global.h>
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 2ef669e..477050a 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -35,10 +35,10 @@
#if defined(CONFIG_SMP) && \
!defined(CONFIG_BFIN_CACHE_COHERENT)
-# if defined(CONFIG_BFIN_ICACHE)
+# if defined(CONFIG_BFIN_ICACHEABLE) || defined(CONFIG_BFIN_L2_ICACHEABLE)
# define __ARCH_SYNC_CORE_ICACHE
# endif
-# if defined(CONFIG_BFIN_DCACHE)
+# if defined(CONFIG_BFIN_DCACHEABLE) || defined(CONFIG_BFIN_L2_DCACHEABLE)
# define __ARCH_SYNC_CORE_DCACHE
# endif
#ifndef __ASSEMBLY__
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 5c17dee..7e55549e 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -56,7 +56,7 @@
static inline void flush_icache_range(unsigned start, unsigned end)
{
-#if defined(CONFIG_BFIN_WB)
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
blackfin_dcache_flush_range(start, end);
#endif
@@ -87,9 +87,9 @@
#else
# define invalidate_dcache_range(start,end) do { } while (0)
#endif
-#if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_WB)
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
# define flush_dcache_range(start,end) blackfin_dcache_flush_range((start), (end))
-# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
+# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
#else
# define flush_dcache_range(start,end) do { } while (0)
# define flush_dcache_page(page) do { } while (0)
@@ -100,7 +100,7 @@
static inline int bfin_addr_dcacheable(unsigned long addr)
{
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
if (addr < (_ramend - DMA_UNCACHED_REGION))
return 1;
#endif
@@ -109,7 +109,7 @@
addr >= _ramend && addr < physical_mem_end)
return 1;
-#ifndef CONFIG_BFIN_L2_NOT_CACHED
+#ifdef CONFIG_BFIN_L2_DCACHEABLE
if (addr >= L2_START && addr < L2_START + L2_LENGTH)
return 1;
#endif
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index a75a6a9..c5dacf8 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -37,8 +37,6 @@
#define L1_IMEMORY ( CPLB_USER_RD | CPLB_VALID | CPLB_LOCK)
#define SDRAM_INON_CHBL ( CPLB_USER_RD | CPLB_VALID)
-/*Use the menuconfig cache policy here - CONFIG_BFIN_WT/CONFIG_BFIN_WB*/
-
#if ANOMALY_05000158
#define ANOMALY_05000158_WORKAROUND 0x200
#else
@@ -47,10 +45,12 @@
#define CPLB_COMMON (CPLB_DIRTY | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND)
-#ifdef CONFIG_BFIN_WB /*Write Back Policy */
+#ifdef CONFIG_BFIN_EXTMEM_WRITEBACK
#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_COMMON)
-#else /*Write Through */
+#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON)
+#else
+#define SDRAM_DGENERIC (CPLB_COMMON)
#endif
#define SDRAM_DNON_CHBL (CPLB_COMMON)
@@ -61,21 +61,23 @@
#ifdef CONFIG_SMP
#define L2_ATTR (INITIAL_T | I_CPLB | D_CPLB)
-#define L2_IMEMORY (CPLB_COMMON)
-#define L2_DMEMORY (CPLB_LOCK | CPLB_COMMON)
+#define L2_IMEMORY (CPLB_COMMON | PAGE_SIZE_1MB)
+#define L2_DMEMORY (CPLB_LOCK | CPLB_COMMON | PAGE_SIZE_1MB)
#else
#define L2_ATTR (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB)
-#define L2_IMEMORY (SDRAM_IGENERIC)
-
-# if defined(CONFIG_BFIN_L2_WB)
-# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_COMMON)
-# elif defined(CONFIG_BFIN_L2_WT)
-# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON)
-# elif defined(CONFIG_BFIN_L2_NOT_CACHED)
-# define L2_DMEMORY (CPLB_COMMON)
+# if defined(CONFIG_BFIN_L2_ICACHEABLE)
+# define L2_IMEMORY (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | PAGE_SIZE_1MB)
# else
-# define L2_DMEMORY (0)
+# define L2_IMEMORY ( CPLB_USER_RD | CPLB_VALID | PAGE_SIZE_1MB)
+# endif
+
+# if defined(CONFIG_BFIN_L2_WRITEBACK)
+# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_COMMON | PAGE_SIZE_1MB)
+# elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
+# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON | PAGE_SIZE_1MB)
+# else
+# define L2_DMEMORY (CPLB_COMMON | PAGE_SIZE_1MB)
# endif
#endif /* CONFIG_SMP */
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index d7d9148..ed6b1f3 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -95,4 +95,17 @@
enum dma_data_direction dir)
{
}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+}
+
#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index bbe1c37..87ba9ad 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,9 +35,9 @@
#include <asm/atomic.h>
#include <asm/traps.h>
-#define IPIPE_ARCH_STRING "1.10-00"
+#define IPIPE_ARCH_STRING "1.11-00"
#define IPIPE_MAJOR_NUMBER 1
-#define IPIPE_MINOR_NUMBER 10
+#define IPIPE_MINOR_NUMBER 11
#define IPIPE_PATCH_NUMBER 0
#ifdef CONFIG_SMP
@@ -207,7 +207,7 @@
int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
+#ifdef CONFIG_TICKSOURCE_CORETMR
#define IRQ_SYSTMR IRQ_CORETMR
#define IRQ_PRIOTMR IRQ_CORETMR
#else
@@ -240,8 +240,13 @@
#define ipipe_init_irq_threads() do { } while (0)
#define ipipe_start_irq_thread(irq, desc) 0
+#ifndef CONFIG_TICKSOURCE_GPTMR0
#define IRQ_SYSTMR IRQ_CORETMR
#define IRQ_PRIOTMR IRQ_CORETMR
+#else
+#define IRQ_SYSTMR IRQ_TIMER0
+#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
+#endif
#define __ipipe_root_tick_p(regs) 1
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h
index 3e8acbd..490098f 100644
--- a/arch/blackfin/include/asm/ipipe_base.h
+++ b/arch/blackfin/include/asm/ipipe_base.h
@@ -51,23 +51,23 @@
extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
-static inline void __ipipe_stall_root(void)
-{
- volatile unsigned long *p = &__ipipe_root_status;
- set_bit(0, p);
-}
+#define __ipipe_stall_root() \
+ do { \
+ volatile unsigned long *p = &__ipipe_root_status; \
+ set_bit(0, p); \
+ } while (0)
-static inline unsigned long __ipipe_test_and_stall_root(void)
-{
- volatile unsigned long *p = &__ipipe_root_status;
- return test_and_set_bit(0, p);
-}
+#define __ipipe_test_and_stall_root() \
+ ({ \
+ volatile unsigned long *p = &__ipipe_root_status; \
+ test_and_set_bit(0, p); \
+ })
-static inline unsigned long __ipipe_test_root(void)
-{
- const unsigned long *p = &__ipipe_root_status;
- return test_bit(0, p);
-}
+#define __ipipe_test_root() \
+ ({ \
+ const unsigned long *p = &__ipipe_root_status; \
+ test_bit(0, p); \
+ })
#endif /* !__ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 9a7f63a..42a15f5 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -22,13 +22,6 @@
/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
#include <mach/irq.h>
-/* Xenomai IPIPE helpers */
-#define local_irq_restore_hw(x) local_irq_restore(x)
-#define local_irq_save_hw(x) local_irq_save(x)
-#define local_irq_enable_hw(x) local_irq_enable(x)
-#define local_irq_disable_hw(x) local_irq_disable(x)
-#define irqs_disabled_hw(x) irqs_disabled(x)
-
#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
#else
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 139cba4..9b19a19 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -31,6 +31,150 @@
return flags;
}
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_base.h>
+#include <linux/ipipe_trace.h>
+
+#ifdef CONFIG_DEBUG_HWERR
+# define bfin_no_irqs 0x3f
+#else
+# define bfin_no_irqs 0x1f
+#endif
+
+#define raw_local_irq_disable() \
+ do { \
+ ipipe_check_context(ipipe_root_domain); \
+ __ipipe_stall_root(); \
+ barrier(); \
+ } while (0)
+
+static inline void raw_local_irq_enable(void)
+{
+ barrier();
+ ipipe_check_context(ipipe_root_domain);
+ __ipipe_unstall_root();
+}
+
+#define raw_local_save_flags_ptr(x) \
+ do { \
+ *(x) = __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags; \
+ } while (0)
+
+#define raw_local_save_flags(x) raw_local_save_flags_ptr(&(x))
+
+#define raw_irqs_disabled_flags(x) ((x) == bfin_no_irqs)
+
+#define raw_local_irq_save_ptr(x) \
+ do { \
+ *(x) = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags; \
+ barrier(); \
+ } while (0)
+
+#define raw_local_irq_save(x) \
+ do { \
+ ipipe_check_context(ipipe_root_domain); \
+ raw_local_irq_save_ptr(&(x)); \
+ } while (0)
+
+static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
+{
+ /*
+ * Merge virtual and real interrupt mask bits into a single
+ * 32bit word.
+ */
+ return (real & ~(1 << 31)) | ((virt != 0) << 31);
+}
+
+static inline int raw_demangle_irq_bits(unsigned long *x)
+{
+ int virt = (*x & (1 << 31)) != 0;
+ *x &= ~(1L << 31);
+ return virt;
+}
+
+static inline void local_irq_disable_hw_notrace(void)
+{
+ bfin_cli();
+}
+
+static inline void local_irq_enable_hw_notrace(void)
+{
+ bfin_sti(bfin_irq_flags);
+}
+
+#define local_save_flags_hw(flags) \
+ do { \
+ (flags) = bfin_read_IMASK(); \
+ } while (0)
+
+#define irqs_disabled_flags_hw(flags) (((flags) & ~0x3f) == 0)
+
+#define irqs_disabled_hw() \
+ ({ \
+ unsigned long flags; \
+ local_save_flags_hw(flags); \
+ irqs_disabled_flags_hw(flags); \
+ })
+
+static inline void local_irq_save_ptr_hw(unsigned long *flags)
+{
+ *flags = bfin_cli();
+#ifdef CONFIG_DEBUG_HWERR
+ bfin_sti(0x3f);
+#endif
+}
+
+#define local_irq_save_hw_notrace(flags) \
+ do { \
+ local_irq_save_ptr_hw(&(flags)); \
+ } while (0)
+
+static inline void local_irq_restore_hw_notrace(unsigned long flags)
+{
+ if (!irqs_disabled_flags_hw(flags))
+ local_irq_enable_hw_notrace();
+}
+
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+# define local_irq_disable_hw() \
+ do { \
+ if (!irqs_disabled_hw()) { \
+ local_irq_disable_hw_notrace(); \
+ ipipe_trace_begin(0x80000000); \
+ } \
+ } while (0)
+# define local_irq_enable_hw() \
+ do { \
+ if (irqs_disabled_hw()) { \
+ ipipe_trace_end(0x80000000); \
+ local_irq_enable_hw_notrace(); \
+ } \
+ } while (0)
+# define local_irq_save_hw(flags) \
+ do { \
+ local_save_flags_hw(flags); \
+ if (!irqs_disabled_flags_hw(flags)) { \
+ local_irq_disable_hw_notrace(); \
+ ipipe_trace_begin(0x80000001); \
+ } \
+ } while (0)
+# define local_irq_restore_hw(flags) \
+ do { \
+ if (!irqs_disabled_flags_hw(flags)) { \
+ ipipe_trace_end(0x80000001); \
+ local_irq_enable_hw_notrace(); \
+ } \
+ } while (0)
+#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
+# define local_irq_disable_hw() local_irq_disable_hw_notrace()
+# define local_irq_enable_hw() local_irq_enable_hw_notrace()
+# define local_irq_save_hw(flags) local_irq_save_hw_notrace(flags)
+# define local_irq_restore_hw(flags) local_irq_restore_hw_notrace(flags)
+#endif /* !CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#else /* CONFIG_IPIPE */
+
static inline void raw_local_irq_disable(void)
{
bfin_cli();
@@ -44,12 +188,6 @@
#define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0)
-static inline void raw_local_irq_restore(unsigned long flags)
-{
- if (!raw_irqs_disabled_flags(flags))
- raw_local_irq_enable();
-}
-
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags = bfin_cli();
@@ -60,4 +198,18 @@
}
#define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0)
+#define local_irq_save_hw(flags) raw_local_irq_save(flags)
+#define local_irq_restore_hw(flags) raw_local_irq_restore(flags)
+#define local_irq_enable_hw() raw_local_irq_enable()
+#define local_irq_disable_hw() raw_local_irq_disable()
+#define irqs_disabled_hw() irqs_disabled()
+
+#endif /* !CONFIG_IPIPE */
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+ if (!raw_irqs_disabled_flags(flags))
+ raw_local_irq_enable();
+}
+
#endif
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
index 61f7487..4179e32 100644
--- a/arch/blackfin/include/asm/mem_init.h
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -59,7 +59,7 @@
#define SDRAM_tRP TRP_1
#define SDRAM_tRP_num 1
#define SDRAM_tRAS TRAS_4
-#define SDRAM_tRAS_num 3
+#define SDRAM_tRAS_num 4
#define SDRAM_tRCD TRCD_1
#define SDRAM_tWR TWR_2
#endif
@@ -89,6 +89,85 @@
#endif
#endif
+/*
+ * The BF526-EZ-Board changed SDRAM chips between revisions,
+ * so we use below timings to accommodate both.
+ */
+#if defined(CONFIG_MEM_MT48H32M16LFCJ_75)
+#if (CONFIG_SCLK_HZ > 119402985)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_8
+#define SDRAM_tRAS_num 8
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_7
+#define SDRAM_tRAS_num 7
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_6
+#define SDRAM_tRAS_num 6
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_5
+#define SDRAM_tRAS_num 5
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_4
+#define SDRAM_tRAS_num 4
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_4
+#define SDRAM_tRAS_num 4
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_3
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_3
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ <= 29850746)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_2
+#define SDRAM_tRAS_num 2
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#endif
+
#if defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \
defined(CONFIG_MEM_MT48LC8M32B2B5_7)
/*SDRAM INFORMATION: */
@@ -109,6 +188,13 @@
#define SDRAM_CL CL_3
#endif
+#if defined(CONFIG_MEM_MT48H32M16LFCJ_75)
+ /*SDRAM INFORMATION: */
+#define SDRAM_Tref 64 /* Refresh period in milliseconds */
+#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */
+#define SDRAM_CL CL_2
+#endif
+
#ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC
/* Equation from section 17 (p17-46) of BF533 HRM */
diff --git a/arch/blackfin/include/asm/mem_map.h b/arch/blackfin/include/asm/mem_map.h
index e92b310..5e21627 100644
--- a/arch/blackfin/include/asm/mem_map.h
+++ b/arch/blackfin/include/asm/mem_map.h
@@ -1,87 +1,84 @@
/*
- * mem_map.h
- * Common header file for blackfin family of processors.
+ * Common Blackfin memory map
*
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_H_
-#define _MEM_MAP_H_
+#ifndef __BFIN_MEM_MAP_H__
+#define __BFIN_MEM_MAP_H__
#include <mach/mem_map.h>
-#ifndef __ASSEMBLY__
+/* Every Blackfin so far has MMRs like this */
+#ifndef COREMMR_BASE
+# define COREMMR_BASE 0xFFE00000
+#endif
+#ifndef SYSMMR_BASE
+# define SYSMMR_BASE 0xFFC00000
+#endif
-#ifdef CONFIG_SMP
-static inline ulong get_l1_scratch_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
-}
-static inline ulong get_l1_code_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_CODE_START : COREA_L1_CODE_START;
-}
-static inline ulong get_l1_data_a_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;
-}
-static inline ulong get_l1_data_b_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;
-}
+/* Every Blackfin so far has on-chip Scratch Pad SRAM like this */
+#ifndef L1_SCRATCH_START
+# define L1_SCRATCH_START 0xFFB00000
+# define L1_SCRATCH_LENGTH 0x1000
+#endif
-static inline ulong get_l1_scratch_start(void)
-{
- return get_l1_scratch_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_code_start(void)
-{
- return get_l1_code_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_data_a_start(void)
-{
- return get_l1_data_a_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_data_b_start(void)
-{
- return get_l1_data_b_start_cpu(blackfin_core_id());
-}
+/* Most parts lack on-chip L2 SRAM */
+#ifndef L2_START
+# define L2_START 0
+# define L2_LENGTH 0
+#endif
-#else /* !CONFIG_SMP */
+/* Most parts lack on-chip L1 ROM */
+#ifndef L1_ROM_START
+# define L1_ROM_START 0
+# define L1_ROM_LENGTH 0
+#endif
-static inline ulong get_l1_scratch_start_cpu(int cpu)
+/* Allow wonky SMP ports to override this */
+#ifndef GET_PDA_SAFE
+# define GET_PDA_SAFE(preg) \
+ preg.l = _cpu_pda; \
+ preg.h = _cpu_pda;
+# define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
+
+# ifndef __ASSEMBLY__
+
+static inline unsigned long get_l1_scratch_start_cpu(int cpu)
{
return L1_SCRATCH_START;
}
-static inline ulong get_l1_code_start_cpu(int cpu)
+static inline unsigned long get_l1_code_start_cpu(int cpu)
{
return L1_CODE_START;
}
-static inline ulong get_l1_data_a_start_cpu(int cpu)
+static inline unsigned long get_l1_data_a_start_cpu(int cpu)
{
return L1_DATA_A_START;
}
-static inline ulong get_l1_data_b_start_cpu(int cpu)
+static inline unsigned long get_l1_data_b_start_cpu(int cpu)
{
return L1_DATA_B_START;
}
-static inline ulong get_l1_scratch_start(void)
+static inline unsigned long get_l1_scratch_start(void)
{
return get_l1_scratch_start_cpu(0);
}
-static inline ulong get_l1_code_start(void)
+static inline unsigned long get_l1_code_start(void)
{
return get_l1_code_start_cpu(0);
}
-static inline ulong get_l1_data_a_start(void)
+static inline unsigned long get_l1_data_a_start(void)
{
return get_l1_data_a_start_cpu(0);
}
-static inline ulong get_l1_data_b_start(void)
+static inline unsigned long get_l1_data_b_start(void)
{
return get_l1_data_b_start_cpu(0);
}
-#endif /* CONFIG_SMP */
-#endif /* __ASSEMBLY__ */
+# endif /* __ASSEMBLY__ */
+#endif /* !GET_PDA_SAFE */
-#endif /* _MEM_MAP_H_ */
+#endif
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index 294dbda..85e8f16 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -135,11 +135,13 @@
};
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+#include <mach/blackfin.h>
+
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
unsigned long tmp = 0;
- unsigned long flags = 0;
+ unsigned long flags;
local_irq_save_hw(flags);
diff --git a/arch/blackfin/include/asm/traps.h b/arch/blackfin/include/asm/traps.h
index 34f7295..3cdc454 100644
--- a/arch/blackfin/include/asm/traps.h
+++ b/arch/blackfin/include/asm/traps.h
@@ -111,9 +111,7 @@
level " bits in the Watchpoint Instruction Address Control register (WPIACTL) is set.\n"
#define EXC_0x2A(level) \
"Instruction fetch misaligned address violation\n" \
- level " - Attempted misaligned instruction cache fetch. On a misaligned instruction fetch\n" \
- level " exception, the return address provided in RETX is the destination address which is\n" \
- level " misaligned, rather than the address of the offending instruction.\n"
+ level " - Attempted misaligned instruction cache fetch.\n"
#define EXC_0x2B(level) \
"CPLB protection violation\n" \
level " - Illegal instruction fetch access (memory protection violation).\n"
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 8894e9f..2f469a1 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -265,4 +265,26 @@
#define clear_user(to, n) __clear_user(to, n)
+/* How to interpret these return values:
+ * CORE: can be accessed by core load or dma memcpy
+ * CORE_ONLY: can only be accessed by core load
+ * DMA: can only be accessed by dma memcpy
+ * IDMA: can only be accessed by interprocessor dma memcpy (BF561)
+ * ITEST: can be accessed by isram memcpy or dma memcpy
+ */
+enum {
+ BFIN_MEM_ACCESS_CORE = 0,
+ BFIN_MEM_ACCESS_CORE_ONLY,
+ BFIN_MEM_ACCESS_DMA,
+ BFIN_MEM_ACCESS_IDMA,
+ BFIN_MEM_ACCESS_ITEST,
+};
+/**
+ * bfin_mem_access_type() - what kind of memory access is required
+ * @addr: the address to check
+ * @size: number of bytes needed
+ * @return: <0 is error, >=0 is BFIN_MEM_ACCESS_xxx enum (see above)
+ */
+int bfin_mem_access_type(unsigned long addr, unsigned long size);
+
#endif /* _BLACKFIN_UACCESS_H */
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index da35133..c8e7ee4 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -381,8 +381,9 @@
#define __NR_preadv 366
#define __NR_pwritev 367
#define __NR_rt_tgsigqueueinfo 368
+#define __NR_perf_counter_open 369
-#define __NR_syscall 369
+#define __NR_syscall 370
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 3731088..141d928 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -20,7 +20,6 @@
CFLAGS_REMOVE_ftrace.o = -pg
obj-$(CONFIG_IPIPE) += ipipe.o
-obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index c006a44..36193ee 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -46,13 +46,13 @@
printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
-#ifdef CONFIG_BFIN_ICACHE
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
d_cache = CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_WT
+#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH
d_cache |= CPLB_L1_AOW | CPLB_WT;
#endif
#endif
@@ -91,9 +91,9 @@
/* Cover L2 memory */
#if L2_LENGTH > 0
dcplb_tbl[cpu][i_d].addr = L2_START;
- dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB;
+ dcplb_tbl[cpu][i_d++].data = L2_DMEMORY;
icplb_tbl[cpu][i_i].addr = L2_START;
- icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB;
+ icplb_tbl[cpu][i_i++].data = L2_IMEMORY;
#endif
first_mask_dcplb = i_d;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 784923e..bcdfe9b 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -150,15 +150,19 @@
nr_dcplb_miss[cpu]++;
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
if (bfin_addr_dcacheable(addr)) {
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
-#ifdef CONFIG_BFIN_WT
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
d_data |= CPLB_L1_AOW | CPLB_WT;
-#endif
+# endif
}
#endif
- if (addr >= physical_mem_end) {
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else if (addr >= physical_mem_end) {
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
&& (status & FAULT_USERSUPV)) {
addr &= ~0x3fffff;
@@ -235,7 +239,7 @@
i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_ICACHE
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
/*
* Normal RAM, and possibly the reserved memory area, are
* cacheable.
@@ -245,7 +249,10 @@
i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
- if (addr >= physical_mem_end) {
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ i_data = L2_IMEMORY;
+ } else if (addr >= physical_mem_end) {
if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & FAULT_USERSUPV)) {
addr &= ~(1 * 1024 * 1024 - 1);
@@ -365,13 +372,18 @@
local_irq_save_hw(flags);
current_rwx_mask[cpu] = masks;
- d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_DCACHE
- d_data |= CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_WT
- d_data |= CPLB_L1_AOW | CPLB_WT;
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else {
+ d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ d_data |= CPLB_L1_CHBL;
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_data |= CPLB_L1_AOW | CPLB_WT;
+# endif
#endif
-#endif
+ }
disable_dcplb();
for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index d8cde1f..b8d2203 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -52,7 +52,7 @@
atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
-unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
+unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
EXPORT_SYMBOL(__ipipe_irq_lvmask);
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
@@ -342,8 +342,3 @@
}
EXPORT_SYMBOL(show_stack);
-
-#ifdef CONFIG_IPIPE_TRACE_MCOUNT
-void notrace _mcount(void);
-EXPORT_SYMBOL(_mcount);
-#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 6e31e93..4b5fd36 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -38,38 +38,15 @@
#include <asm/pda.h>
static atomic_t irq_err_count;
-static spinlock_t irq_controller_lock;
-
-/*
- * Dummy mask/unmask handler
- */
-void dummy_mask_unmask_irq(unsigned int irq)
-{
-}
-
void ack_bad_irq(unsigned int irq)
{
atomic_inc(&irq_err_count);
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
-static struct irq_chip bad_chip = {
- .ack = dummy_mask_unmask_irq,
- .mask = dummy_mask_unmask_irq,
- .unmask = dummy_mask_unmask_irq,
-};
-
-static int bad_stats;
static struct irq_desc bad_irq_desc = {
- .status = IRQ_DISABLED,
- .chip = &bad_chip,
.handle_irq = handle_bad_irq,
- .depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
- .kstat_irqs = &bad_stats,
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -77,6 +54,7 @@
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
+#ifdef CONFIG_PROC_FS
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
@@ -108,50 +86,29 @@
}
return 0;
}
-
-/*
- * do_IRQ handles all hardware IRQs. Decoded IRQs should not
- * come via this function. Instead, they should provide their
- * own 'handler'
- */
-#ifdef CONFIG_DO_IRQ_L1
-__attribute__((l1_text))
#endif
-asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
-{
- struct pt_regs *old_regs;
- struct irq_desc *desc = irq_desc + irq;
-#ifndef CONFIG_IPIPE
- unsigned short pending, other_ints;
-#endif
- old_regs = set_irq_regs(regs);
- /*
- * Some hardware gives randomly wrong interrupts. Rather
- * than crashing, do something sensible.
- */
- if (irq >= NR_IRQS)
- desc = &bad_irq_desc;
-
- irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
+static void check_stack_overflow(int irq)
+{
/* Debugging check for stack overflow: is there less than STACK_WARN free? */
- {
- long sp;
+ long sp = __get_SP() & (THREAD_SIZE - 1);
- sp = __get_SP() & (THREAD_SIZE-1);
-
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- dump_stack();
- printk(KERN_EMERG "%s: possible stack overflow while handling irq %i "
- " only %ld bytes free\n",
- __func__, irq, sp - sizeof(struct thread_info));
- }
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ dump_stack();
+ pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
+ irq, sp - sizeof(struct thread_info));
}
+}
+#else
+static inline void check_stack_overflow(int irq) { }
#endif
- generic_handle_irq(irq);
#ifndef CONFIG_IPIPE
+static void maybe_lower_to_irq14(void)
+{
+ unsigned short pending, other_ints;
+
/*
* If we're the only interrupt running (ignoring IRQ15 which
* is for syscalls), lower our priority to IRQ14 so that
@@ -165,7 +122,38 @@
other_ints = pending & (pending - 1);
if (other_ints == 0)
lower_to_irq14();
-#endif /* !CONFIG_IPIPE */
+}
+#else
+static inline void maybe_lower_to_irq14(void) { }
+#endif
+
+/*
+ * do_IRQ handles all hardware IRQs. Decoded IRQs should not
+ * come via this function. Instead, they should provide their
+ * own 'handler'
+ */
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ check_stack_overflow(irq);
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (irq >= NR_IRQS)
+ handle_bad_irq(irq, &bad_irq_desc);
+ else
+ generic_handle_irq(irq);
+
+ maybe_lower_to_irq14();
+
irq_exit();
set_irq_regs(old_regs);
@@ -173,14 +161,6 @@
void __init init_IRQ(void)
{
- struct irq_desc *desc;
- int irq;
-
- spin_lock_init(&irq_controller_lock);
- for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
- *desc = bad_irq_desc;
- }
-
init_arch_irq();
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index da28f79..cce79d0 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -34,15 +34,6 @@
#error change the definition of slavecpulocks
#endif
-#define IN_MEM(addr, size, l1_addr, l1_size) \
-({ \
- unsigned long __addr = (unsigned long)(addr); \
- (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
-})
-#define ASYNC_BANK_SIZE \
- (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
- ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
-
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
gdb_regs[BFIN_R0] = regs->r0;
@@ -463,40 +454,87 @@
static int validate_memory_access_address(unsigned long addr, int size)
{
- int cpu = raw_smp_processor_id();
-
- if (size < 0)
+ if (size < 0 || addr == 0)
return -EFAULT;
- if (addr >= 0x1000 && (addr + size) <= physical_mem_end)
- return 0;
- if (addr >= SYSMMR_BASE)
- return 0;
- if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
- return 0;
- if (cpu == 0) {
- if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return bfin_mem_access_type(addr, size);
+}
+
+static int bfin_probe_kernel_read(char *dst, char *src, int size)
+{
+ unsigned long lsrc = (unsigned long)src;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(lsrc, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (lsrc >= SYSMMR_BASE) {
+ if (size == 2 && lsrc % 2 == 0) {
+ u16 mmr = bfin_read16(src);
+ memcpy(dst, &mmr, sizeof(mmr));
return 0;
- if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ } else if (size == 4 && lsrc % 4 == 0) {
+ u32 mmr = bfin_read32(src);
+ memcpy(dst, &mmr, sizeof(mmr));
return 0;
- if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
- return 0;
- if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
- return 0;
-#ifdef CONFIG_SMP
- } else if (cpu == 1) {
- if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
- return 0;
- if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
- return 0;
- if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
- return 0;
- if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
- return 0;
-#endif
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return probe_kernel_read(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
}
- if (IN_MEM(addr, size, L2_START, L2_LENGTH))
- return 0;
+ return -EFAULT;
+}
+
+static int bfin_probe_kernel_write(char *dst, char *src, int size)
+{
+ unsigned long ldst = (unsigned long)dst;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(ldst, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (ldst >= SYSMMR_BASE) {
+ if (size == 2 && ldst % 2 == 0) {
+ u16 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write16(dst, mmr);
+ return 0;
+ } else if (size == 4 && ldst % 4 == 0) {
+ u32 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write32(dst, mmr);
+ return 0;
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return probe_kernel_write(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
+ }
return -EFAULT;
}
@@ -509,14 +547,6 @@
{
char *tmp;
int err;
- unsigned char *pch;
- unsigned short mmr16;
- unsigned long mmr32;
- int cpu = raw_smp_processor_id();
-
- err = validate_memory_access_address((unsigned long)mem, count);
- if (err)
- return err;
/*
* We use the upper half of buf as an intermediate buffer for the
@@ -524,44 +554,7 @@
*/
tmp = buf + count;
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (count) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = *(unsigned short *)mem;
- pch = (unsigned char *)&mmr16;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- tmp -= 2;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = *(unsigned long *)mem;
- pch = (unsigned char *)&mmr32;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- tmp -= 4;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM*/
- if (dma_memcpy(tmp, mem, count) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_read(tmp, mem, count);
-
+ err = bfin_probe_kernel_read(tmp, mem, count);
if (!err) {
while (count > 0) {
buf = pack_hex_byte(buf, *tmp);
@@ -582,13 +575,8 @@
*/
int kgdb_ebin2mem(char *buf, char *mem, int count)
{
- char *tmp_old;
- char *tmp_new;
- unsigned short *mmr16;
- unsigned long *mmr32;
- int err;
+ char *tmp_old, *tmp_new;
int size;
- int cpu = raw_smp_processor_id();
tmp_old = tmp_new = buf;
@@ -601,41 +589,7 @@
tmp_old++;
}
- err = validate_memory_access_address((unsigned long)mem, size);
- if (err)
- return err;
-
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (size) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = (unsigned short *)buf;
- *(unsigned short *)mem = *mmr16;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = (unsigned long *)buf;
- *(unsigned long *)mem = *mmr32;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(mem, buf, size) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_write(mem, buf, size);
-
- return err;
+ return bfin_probe_kernel_write(mem, buf, count);
}
/*
@@ -645,16 +599,7 @@
*/
int kgdb_hex2mem(char *buf, char *mem, int count)
{
- char *tmp_raw;
- char *tmp_hex;
- unsigned short *mmr16;
- unsigned long *mmr32;
- int err;
- int cpu = raw_smp_processor_id();
-
- err = validate_memory_access_address((unsigned long)mem, count);
- if (err)
- return err;
+ char *tmp_raw, *tmp_hex;
/*
* We use the upper half of buf as an intermediate buffer for the
@@ -669,39 +614,18 @@
*tmp_raw |= hex(*tmp_hex--) << 4;
}
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (count) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = (unsigned short *)tmp_raw;
- *(unsigned short *)mem = *mmr16;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = (unsigned long *)tmp_raw;
- *(unsigned long *)mem = *mmr32;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(mem, tmp_raw, count) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_write(mem, tmp_raw, count);
-
- return err;
+ return bfin_probe_kernel_write(mem, tmp_raw, count);
}
+#define IN_MEM(addr, size, l1_addr, l1_size) \
+({ \
+ unsigned long __addr = (unsigned long)(addr); \
+ (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
+})
+#define ASYNC_BANK_SIZE \
+ (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
+ ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
+
int kgdb_validate_break_address(unsigned long addr)
{
int cpu = raw_smp_processor_id();
@@ -724,46 +648,17 @@
int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
{
- int err;
- int cpu = raw_smp_processor_id();
-
- if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE)
- == NULL)
- return -EFAULT;
-
- if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE) == NULL)
- return -EFAULT;
-
- return 0;
- } else {
- err = probe_kernel_read(saved_instr, (char *)addr,
- BREAK_INSTR_SIZE);
- if (err)
- return err;
-
- return probe_kernel_write((char *)addr,
- arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
- }
+ int err = bfin_probe_kernel_read(saved_instr, (char *)addr,
+ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+ return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
}
int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
{
- if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) {
- /* access L1 instruction SRAM */
- if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
- return -EFAULT;
-
- return 0;
- } else
- return probe_kernel_write((char *)addr,
- (char *)bundle, BREAK_INSTR_SIZE);
+ return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE);
}
int kgdb_arch_init(void)
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S
deleted file mode 100644
index edcfb38..0000000
--- a/arch/blackfin/kernel/mcount.S
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * linux/arch/blackfin/mcount.S
- *
- * Copyright (C) 2006 Analog Devices Inc.
- *
- * 2007/04/12 Save index, length, modify and base registers. --rpm
- */
-
-#include <linux/linkage.h>
-#include <asm/blackfin.h>
-
-.text
-
-.align 4 /* just in case */
-
-ENTRY(__mcount)
- [--sp] = i0;
- [--sp] = i1;
- [--sp] = i2;
- [--sp] = i3;
- [--sp] = l0;
- [--sp] = l1;
- [--sp] = l2;
- [--sp] = l3;
- [--sp] = m0;
- [--sp] = m1;
- [--sp] = m2;
- [--sp] = m3;
- [--sp] = b0;
- [--sp] = b1;
- [--sp] = b2;
- [--sp] = b3;
- [--sp] = ( r7:0, p5:0 );
- [--sp] = ASTAT;
-
- p1.L = _ipipe_trace_enable;
- p1.H = _ipipe_trace_enable;
- r7 = [p1];
- CC = r7 == 0;
- if CC jump out;
- link 0x10;
- r0 = 0x0;
- [sp + 0xc] = r0; /* v */
- r0 = 0x0; /* type: IPIPE_TRACE_FN */
- r1 = rets;
- p0 = [fp]; /* p0: Prior FP */
- r2 = [p0 + 4]; /* r2: Prior RETS */
- call ___ipipe_trace;
- unlink;
-out:
- ASTAT = [sp++];
- ( r7:0, p5:0 ) = [sp++];
- b3 = [sp++];
- b2 = [sp++];
- b1 = [sp++];
- b0 = [sp++];
- m3 = [sp++];
- m2 = [sp++];
- m1 = [sp++];
- m0 = [sp++];
- l3 = [sp++];
- l2 = [sp++];
- l1 = [sp++];
- l0 = [sp++];
- i3 = [sp++];
- i2 = [sp++];
- i1 = [sp++];
- i0 = [sp++];
- rts;
-ENDPROC(__mcount)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e1d86e..79cad0a 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -344,6 +344,87 @@
}
}
+static inline
+int in_mem(unsigned long addr, unsigned long size,
+ unsigned long start, unsigned long end)
+{
+ return addr >= start && addr + size <= end;
+}
+static inline
+int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return const_size &&
+ in_mem(addr, size, const_addr + off, const_addr + const_size);
+}
+static inline
+int in_mem_const(unsigned long addr, unsigned long size,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return in_mem_const_off(addr, 0, size, const_addr, const_size);
+}
+#define IN_ASYNC(bnum, bctlnum) \
+({ \
+ (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \
+ bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \
+ BFIN_MEM_ACCESS_CORE; \
+})
+
+int bfin_mem_access_type(unsigned long addr, unsigned long size)
+{
+ int cpu = raw_smp_processor_id();
+
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
+ return -EFAULT;
+
+ if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#endif
+ if (in_mem_const(addr, size, L2_START, L2_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (addr >= SYSMMR_BASE)
+ return BFIN_MEM_ACCESS_CORE_ONLY;
+
+ /* We can't read EBIU banks that aren't enabled or we end up hanging
+ * on the access to the async space.
+ */
+ if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE))
+ return IN_ASYNC(0, 0);
+ if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
+ return IN_ASYNC(1, 0);
+ if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
+ return IN_ASYNC(2, 1);
+ if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
+ return IN_ASYNC(3, 1);
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_DMA;
+
+ return -EFAULT;
+}
+
#if defined(CONFIG_ACCESS_CHECK)
#ifdef CONFIG_ACCESS_OK_L1
__attribute__((l1_text))
@@ -353,51 +434,61 @@
{
if (size == 0)
return 1;
- if (addr > (addr + size))
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
return 0;
if (segment_eq(get_fs(), KERNEL_DS))
return 1;
#ifdef CONFIG_MTD_UCLINUX
- if (addr >= memory_start && (addr + size) <= memory_end)
- return 1;
- if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
+ if (1)
+#else
+ if (0)
+#endif
+ {
+ if (in_mem(addr, size, memory_start, memory_end))
+ return 1;
+ if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
+ return 1;
+# ifndef CONFIG_ROMFS_ON_MTD
+ if (0)
+# endif
+ /* For XIP, allow user space to use pointers within the ROMFS. */
+ if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
+ return 1;
+ } else {
+ if (in_mem(addr, size, memory_start, physical_mem_end))
+ return 1;
+ }
+
+ if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
return 1;
-#ifdef CONFIG_ROMFS_ON_MTD
- /* For XIP, allow user space to use pointers within the ROMFS. */
- if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return 1;
+ if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return 1;
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
return 1;
#endif
-#else
- if (addr >= memory_start && (addr + size) <= physical_mem_end)
+ if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
return 1;
-#endif
- if (addr >= (unsigned long)__init_begin &&
- addr + size <= (unsigned long)__init_end)
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
return 1;
- if (addr >= get_l1_scratch_start()
- && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
return 1;
-#if L1_CODE_LENGTH != 0
- if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
- && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
- return 1;
-#endif
-#if L1_DATA_A_LENGTH != 0
- if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
- && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
- return 1;
-#endif
-#if L1_DATA_B_LENGTH != 0
- if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
- && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
- return 1;
-#endif
-#if L2_LENGTH != 0
- if (addr >= L2_START + (_ebss_l2 - _stext_l2)
- && addr + size <= L2_START + L2_LENGTH)
- return 1;
-#endif
+
return 0;
}
EXPORT_SYMBOL(_access_ok);
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 6454bab..298f023 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -117,15 +117,49 @@
*/
#ifdef CONFIG_BFIN_ICACHE
printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
-#endif
-#ifdef CONFIG_BFIN_DCACHE
- printk(KERN_INFO "Data Cache Enabled for CPU%u"
-# if defined CONFIG_BFIN_WB
- " (write-back)"
-# elif defined CONFIG_BFIN_WT
- " (write-through)"
+ printk(KERN_INFO " External memory:"
+# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
# endif
- "\n", cpu);
+ " in instruction cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# ifdef CONFIG_BFIN_L2_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
+# endif
+ " in instruction cache\n");
+
+#else
+ printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
+#endif
+
+#ifdef CONFIG_BFIN_DCACHE
+ printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
+ printk(KERN_INFO " External memory:"
+# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
+# endif
+ " in data cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# if defined CONFIG_BFIN_L2_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_L2_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
+# endif
+ " in data cache\n");
+#else
+ printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
#endif
}
@@ -443,9 +477,11 @@
} else if (!memcmp(to, "clkin_hz=", 9)) {
to += 9;
early_init_clkin_hz(to);
+#ifdef CONFIG_EARLY_PRINTK
} else if (!memcmp(to, "earlyprintk=", 12)) {
to += 12;
setup_early_printk(to);
+#endif
} else if (!memcmp(to, "memmap=", 7)) {
to += 7;
parse_memmap(to);
@@ -516,7 +552,7 @@
&& ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1)
mtd_size =
PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
-# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -544,7 +580,7 @@
dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
#endif /* CONFIG_MTD_UCLINUX */
-#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -764,6 +800,11 @@
{
unsigned long sclk, cclk;
+ /* Check to make sure we are running on the right processor */
+ if (unlikely(CPUID != bfin_cpuid()))
+ printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
+ CPU, bfin_cpuid(), bfin_revid());
+
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
@@ -778,14 +819,17 @@
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
- /* setup memory defaults from the user config */
- physical_mem_end = 0;
- _ramend = get_mem_size() * 1024 * 1024;
-
memset(&bfin_memmap, 0, sizeof(bfin_memmap));
+ /* If the user does not specify things on the command line, use
+ * what the bootloader set things up as
+ */
+ physical_mem_end = 0;
parse_cmdline_early(&command_line[0]);
+ if (_ramend == 0)
+ _ramend = get_mem_size() * 1024 * 1024;
+
if (physical_mem_end == 0)
physical_mem_end = _ramend;
@@ -837,7 +881,8 @@
defined(CONFIG_BF538) || defined(CONFIG_BF539)
_bfin_swrst = bfin_read_SWRST();
#else
- _bfin_swrst = bfin_read_SYSCR();
+ /* Clear boot mode field */
+ _bfin_swrst = bfin_read_SYSCR() & ~0xf;
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -875,10 +920,7 @@
else
printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
- if (unlikely(CPUID != bfin_cpuid()))
- printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
- CPU, bfin_cpuid(), bfin_revid());
- else {
+ if (likely(CPUID == bfin_cpuid())) {
if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
@@ -1157,16 +1199,25 @@
icache_size = 0;
seq_printf(m, "cache size\t: %d KB(L1 icache) "
- "%d KB(L1 dcache%s) %d KB(L2 cache)\n",
- icache_size, dcache_size,
-#if defined CONFIG_BFIN_WB
- "-wb"
-#elif defined CONFIG_BFIN_WT
- "-wt"
-#endif
- "", 0);
-
+ "%d KB(L1 dcache) %d KB(L2 cache)\n",
+ icache_size, dcache_size, 0);
seq_printf(m, "%s\n", cache);
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
if (icache_size)
seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
@@ -1239,8 +1290,25 @@
if (cpu_num != num_possible_cpus() - 1)
return 0;
- if (L2_LENGTH)
+ if (L2_LENGTH) {
seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
+ }
seq_printf(m, "board name\t: %s\n", bfin_board_name);
seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index d279552..8eeb457 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -37,6 +37,7 @@
#include <asm/traps.h>
#include <asm/cacheflush.h>
#include <asm/cplb.h>
+#include <asm/dma.h>
#include <asm/blackfin.h>
#include <asm/irq_handler.h>
#include <linux/irq.h>
@@ -636,57 +637,30 @@
*/
static bool get_instruction(unsigned short *val, unsigned short *address)
{
-
- unsigned long addr;
-
- addr = (unsigned long)address;
+ unsigned long addr = (unsigned long)address;
/* Check for odd addresses */
if (addr & 0x1)
return false;
- /* Check that things do not wrap around */
- if (addr > (addr + 2))
+ /* MMR region will never have instructions */
+ if (addr >= SYSMMR_BASE)
return false;
- /*
- * Since we are in exception context, we need to do a little address checking
- * We need to make sure we are only accessing valid memory, and
- * we don't read something in the async space that can hang forever
- */
- if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) ||
-#if L2_LENGTH != 0
- (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) ||
-#endif
- (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) ||
-#if L1_DATA_A_LENGTH != 0
- (addr >= L1_DATA_A_START && (addr + 2) <= (L1_DATA_A_START + L1_DATA_A_LENGTH)) ||
-#endif
-#if L1_DATA_B_LENGTH != 0
- (addr >= L1_DATA_B_START && (addr + 2) <= (L1_DATA_B_START + L1_DATA_B_LENGTH)) ||
-#endif
- (addr >= L1_SCRATCH_START && (addr + 2) <= (L1_SCRATCH_START + L1_SCRATCH_LENGTH)) ||
- (!(bfin_read_EBIU_AMBCTL0() & B0RDYEN) &&
- addr >= ASYNC_BANK0_BASE && (addr + 2) <= (ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL0() & B1RDYEN) &&
- addr >= ASYNC_BANK1_BASE && (addr + 2) <= (ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL1() & B2RDYEN) &&
- addr >= ASYNC_BANK2_BASE && (addr + 2) <= (ASYNC_BANK2_BASE + ASYNC_BANK1_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL1() & B3RDYEN) &&
- addr >= ASYNC_BANK3_BASE && (addr + 2) <= (ASYNC_BANK3_BASE + ASYNC_BANK1_SIZE))) {
- *val = *address;
- return true;
+ switch (bfin_mem_access_type(addr, 2)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ *val = *address;
+ return true;
+ case BFIN_MEM_ACCESS_DMA:
+ dma_memcpy(val, address, 2);
+ return true;
+ case BFIN_MEM_ACCESS_ITEST:
+ isram_memcpy(val, address, 2);
+ return true;
+ default: /* invalid access */
+ return false;
}
-
-#if L1_CODE_LENGTH != 0
- if (addr >= L1_CODE_START && (addr + 2) <= (L1_CODE_START + L1_CODE_LENGTH)) {
- isram_memcpy(val, address, 2);
- return true;
- }
-#endif
-
-
- return false;
}
/*
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 1382f03..d979110 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -119,13 +119,19 @@
};
#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
-static struct dsa_platform_data ksz8893m_switch_data = {
+static struct dsa_chip_data ksz8893m_switch_chip_data = {
.mii_bus = &bfin_mii_bus.dev,
+ .port_names = {
+ NULL,
+ "eth%d",
+ "eth%d",
+ "cpu",
+ },
+};
+static struct dsa_platform_data ksz8893m_switch_data = {
+ .nr_chips = 1,
.netdev = &bfin_mac_device.dev,
- .port_names[0] = NULL,
- .port_names[1] = "eth%d",
- .port_names[2] = "eth%d",
- .port_names[3] = "cpu",
+ .chip = &ksz8893m_switch_chip_data,
};
static struct platform_device ksz8893m_switch_device = {
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index b69bd9a..426e064 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -7,7 +7,7 @@
*/
/* This file should be up to date with:
- * - Revision B, 02/03/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
+ * - Revision C, 06/12/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
*/
/* We plan on not supporting 0.0 silicon, but 0.1 isn't out yet - sorry */
@@ -18,7 +18,7 @@
#ifndef _MACH_ANOMALY_H_
#define _MACH_ANOMALY_H_
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
@@ -45,29 +45,31 @@
/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
#define ANOMALY_05000426 (1)
/* Software System Reset Corrupts PLL_LOCKCNT Register */
-#define ANOMALY_05000430 (1)
+#define ANOMALY_05000430 (__SILICON_REVISION__ < 1)
/* Incorrect Use of Stack in Lockbox Firmware During Authentication */
#define ANOMALY_05000431 (1)
/* Certain SIC Registers are not Reset After Soft or Core Double Fault Reset */
-#define ANOMALY_05000435 (1)
+#define ANOMALY_05000435 (__SILICON_REVISION__ < 1)
/* PORTx_DRIVE and PORTx_HYSTERESIS Registers Read Back Incorrect Values */
-#define ANOMALY_05000438 (1)
+#define ANOMALY_05000438 (__SILICON_REVISION__ < 1)
/* Preboot Cannot be Used to Alter the PLL_DIV Register */
-#define ANOMALY_05000439 (1)
+#define ANOMALY_05000439 (__SILICON_REVISION__ < 1)
/* bfrom_SysControl() Cannot be Used to Write the PLL_DIV Register */
-#define ANOMALY_05000440 (1)
+#define ANOMALY_05000440 (__SILICON_REVISION__ < 1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
/* Incorrect L1 Instruction Bank B Memory Map Location */
-#define ANOMALY_05000444 (1)
+#define ANOMALY_05000444 (__SILICON_REVISION__ < 1)
/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
-#define ANOMALY_05000452 (1)
+#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
/* PWM_TRIPB Signal Not Available on PG10 */
-#define ANOMALY_05000453 (1)
+#define ANOMALY_05000453 (__SILICON_REVISION__ < 1)
/* PPI_FS3 is Driven One Half Cycle Later Than PPI Data */
-#define ANOMALY_05000455 (1)
-/* False Hardware Error when RETI points to invalid memory */
+#define ANOMALY_05000455 (__SILICON_REVISION__ < 1)
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
+#define ANOMALY_05000462 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -78,24 +80,30 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000281 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000285 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
@@ -103,10 +111,13 @@
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000312 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000353 (0)
+#define ANOMALY_05000357 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
+#define ANOMALY_05000371 (0)
#define ANOMALY_05000380 (0)
#define ANOMALY_05000386 (0)
#define ANOMALY_05000389 (0)
@@ -117,5 +128,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
index 267bb7c..e8e14c2 100644
--- a/arch/blackfin/mach-bf518/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf518.h"
-#include "mem_map.h"
#include "defBF512.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf518/include/mach/mem_map.h b/arch/blackfin/mach-bf518/include/mach/mem_map.h
index 62bcc78..3c6777c 100644
--- a/arch/blackfin/mach-bf518/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf518/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf518/mem_map.h
- * based on: include/asm-blackfin/mach-bf527/mem_map.h
- * author: Bryan Wu <cooloney@kernel.org>
+ * BF51x memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF518/6/4/2 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_518_H_
-#define _MEM_MAP_518_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -89,20 +67,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE */
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_518_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 1eaf27f..f4867ce 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -78,7 +78,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 9f9c000..b2f30f0 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -237,10 +237,10 @@
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
- .type = "m25p16",
+ .type = "sst25wf040",
};
-/* SPI flash chip (m25p64) */
+/* SPI flash chip (sst25wf040) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 3e5b7db..799a1d1 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -77,7 +77,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index c84ddea..0d63f74 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -34,7 +34,7 @@
#define _ANOMALY_BF527(rev527) (ANOMALY_BF527 && __SILICON_REVISION__ rev527)
#define _ANOMALY_BF526_BF527(rev526, rev527) (_ANOMALY_BF526(rev526) || _ANOMALY_BF527(rev527))
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1) /* note: brokenness is noted in documentation, not anomaly sheet */
@@ -184,8 +184,12 @@
#define ANOMALY_05000456 (1)
/* Host DMA Port Responds to Certain Bus Activity Without HOST_CE Assertion */
#define ANOMALY_05000457 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* USB Rx DMA hang */
+#define ANOMALY_05000465 (1)
+/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
+#define ANOMALY_05000467 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -195,24 +199,30 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000281 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000285 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
@@ -220,6 +230,7 @@
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000312 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
diff --git a/arch/blackfin/mach-bf527/include/mach/blackfin.h b/arch/blackfin/mach-bf527/include/mach/blackfin.h
index 417abcd..03665a8 100644
--- a/arch/blackfin/mach-bf527/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf527/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf527.h"
-#include "mem_map.h"
#include "defBF522.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf527/include/mach/mem_map.h b/arch/blackfin/mach-bf527/include/mach/mem_map.h
index 019e001..d96e894 100644
--- a/arch/blackfin/mach-bf527/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf527/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf527/mem_map.h
- * based on: include/asm-blackfin/mach-bf537/mem_map.h
- * author: Michael Hennerich (michael.hennerich@analog.com)
+ * BF52x memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF527/5/2 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_527_H_
-#define _MEM_MAP_527_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -89,20 +67,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE */
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_527_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 89a5ec4..4e3e511 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
@@ -86,6 +87,101 @@
};
#endif
+#if defined(CONFIG_MTD_PSD4256G) || defined(CONFIG_MTD_PSD4256G_MODULE)
+static const char *map_probes[] = {
+ "stm_flash",
+ NULL,
+};
+
+static struct platdata_mtd_ram stm_pri_data_a = {
+ .mapname = "Flash A Primary",
+ .map_probes = map_probes,
+ .bankwidth = 2,
+};
+
+static struct resource stm_pri_resource_a = {
+ .start = 0x20000000,
+ .end = 0x200fffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device stm_pri_device_a = {
+ .name = "mtd-ram",
+ .id = 0,
+ .dev = {
+ .platform_data = &stm_pri_data_a,
+ },
+ .num_resources = 1,
+ .resource = &stm_pri_resource_a,
+};
+
+static struct platdata_mtd_ram stm_pri_data_b = {
+ .mapname = "Flash B Primary",
+ .map_probes = map_probes,
+ .bankwidth = 2,
+};
+
+static struct resource stm_pri_resource_b = {
+ .start = 0x20100000,
+ .end = 0x201fffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device stm_pri_device_b = {
+ .name = "mtd-ram",
+ .id = 4,
+ .dev = {
+ .platform_data = &stm_pri_data_b,
+ },
+ .num_resources = 1,
+ .resource = &stm_pri_resource_b,
+};
+#endif
+
+#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE)
+static struct platdata_mtd_ram sram_data_a = {
+ .mapname = "Flash A SRAM",
+ .bankwidth = 2,
+};
+
+static struct resource sram_resource_a = {
+ .start = 0x20240000,
+ .end = 0x2024ffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device sram_device_a = {
+ .name = "mtd-ram",
+ .id = 8,
+ .dev = {
+ .platform_data = &sram_data_a,
+ },
+ .num_resources = 1,
+ .resource = &sram_resource_a,
+};
+
+static struct platdata_mtd_ram sram_data_b = {
+ .mapname = "Flash B SRAM",
+ .bankwidth = 2,
+};
+
+static struct resource sram_resource_b = {
+ .start = 0x202c0000,
+ .end = 0x202cffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device sram_device_b = {
+ .name = "mtd-ram",
+ .id = 9,
+ .dev = {
+ .platform_data = &sram_data_b,
+ },
+ .num_resources = 1,
+ .resource = &sram_resource_b,
+};
+#endif
+
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
@@ -357,6 +453,16 @@
&bfin_dpmc,
+#if defined(CONFIG_MTD_PSD4256G) || defined(CONFIG_MTD_PSD4256G_MODULE)
+ &stm_pri_device_a,
+ &stm_pri_device_b,
+#endif
+
+#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE)
+ &sram_device_a,
+ &sram_device_b,
+#endif
+
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 31145b5..70a0ad6 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -34,7 +34,7 @@
# define ANOMALY_BF533 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
@@ -46,7 +46,7 @@
#define ANOMALY_05000122 (1)
/* Instruction DMA Can Cause Data Cache Fills to Fail (Boot Implications) */
#define ANOMALY_05000158 (__SILICON_REVISION__ < 5)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
#define ANOMALY_05000167 (1)
@@ -56,13 +56,13 @@
#define ANOMALY_05000180 (1)
/* Timer Pin Limitations for PPI TX Modes with External Frame Syncs */
#define ANOMALY_05000183 (__SILICON_REVISION__ < 4)
-/* False Protection Exceptions */
+/* False Protection Exceptions when Speculative Fetch Is Cancelled */
#define ANOMALY_05000189 (__SILICON_REVISION__ < 4)
/* False I/O Pin Interrupts on Edge-Sensitive Inputs When Polarity Setting Is Changed */
#define ANOMALY_05000193 (__SILICON_REVISION__ < 4)
/* Restarting SPORT in Specific Modes May Cause Data Corruption */
#define ANOMALY_05000194 (__SILICON_REVISION__ < 4)
-/* Failing MMR Accesses When Stalled by Preceding Memory Read */
+/* Failing MMR Accesses when Preceding Memory Read Stalls */
#define ANOMALY_05000198 (__SILICON_REVISION__ < 5)
/* Current DMA Address Shows Wrong Value During Carry Fix */
#define ANOMALY_05000199 (__SILICON_REVISION__ < 4)
@@ -74,7 +74,7 @@
#define ANOMALY_05000202 (__SILICON_REVISION__ < 5)
/* Specific Sequence That Can Cause DMA Error or DMA Stopping */
#define ANOMALY_05000203 (__SILICON_REVISION__ < 4)
-/* Incorrect data read with write-through cache and allocate cache lines on reads only mode */
+/* Incorrect Data Read with Writethrough "Allocate Cache Lines on Reads Only" Cache Mode */
#define ANOMALY_05000204 (__SILICON_REVISION__ < 4 && ANOMALY_BF533)
/* Recovery from "Brown-Out" Condition */
#define ANOMALY_05000207 (__SILICON_REVISION__ < 4)
@@ -106,7 +106,7 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 5)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
-/* Data CPLBs Should Prevent Spurious Hardware Errors */
+/* Data CPLBs Should Prevent False Hardware Errors */
#define ANOMALY_05000246 (__SILICON_REVISION__ < 5)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ == 4)
@@ -148,21 +148,21 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 6)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 6)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 6)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 6)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 6)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 6)
/* Memory-To-Memory DMA Source/Destination Descriptors Must Be in Same Memory Space */
#define ANOMALY_05000301 (__SILICON_REVISION__ < 6)
-/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
+/* SSYNCs after Writes to DMA MMR Registers May Not Be Handled Correctly */
#define ANOMALY_05000302 (__SILICON_REVISION__ < 5)
/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
-/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available On Older Silicon) */
+/* ALT_TIMING Bit in PPI_CONTROL Register Is Not Functional */
#define ANOMALY_05000306 (__SILICON_REVISION__ < 5)
/* SCKELOW Bit Does Not Maintain State Through Hibernate */
#define ANOMALY_05000307 (1) /* note: brokenness is noted in documentation, not anomaly sheet */
@@ -170,11 +170,11 @@
#define ANOMALY_05000310 (1)
/* Erroneous Flag (GPIO) Pin Operations under Specific Sequences */
#define ANOMALY_05000311 (__SILICON_REVISION__ < 6)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 6)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (__SILICON_REVISION__ < 6)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 6)
/* Internal Voltage Regulator Values of 1.05V, 1.10V and 1.15V Not Allowed for LQFP Packages */
#define ANOMALY_05000319 ((ANOMALY_BF531 || ANOMALY_BF532) && __SILICON_REVISION__ < 6)
@@ -200,7 +200,7 @@
#define ANOMALY_05000426 (1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* These anomalies have been "phased" out of analog.com anomaly sheets and are
@@ -215,17 +215,17 @@
#define ANOMALY_05000070 (__SILICON_REVISION__ < 2)
/* Writing FIO_DIR can corrupt a programmable flag's data */
#define ANOMALY_05000079 (__SILICON_REVISION__ < 2)
-/* Timer Auto-Baud Mode requires the UART clock to be enabled */
+/* Timer Auto-Baud Mode requires the UART clock to be enabled. */
#define ANOMALY_05000086 (__SILICON_REVISION__ < 2)
/* Internal Clocking Modes on SPORT0 not supported */
#define ANOMALY_05000088 (__SILICON_REVISION__ < 2)
/* Internal voltage regulator does not wake up from an RTC wakeup */
#define ANOMALY_05000092 (__SILICON_REVISION__ < 2)
-/* The IFLUSH instruction must be preceded by a CSYNC instruction */
+/* The IFLUSH Instruction Must Be Preceded by a CSYNC Instruction */
#define ANOMALY_05000093 (__SILICON_REVISION__ < 2)
-/* Vectoring to an instruction that is presently being filled into the instruction cache may cause erroneous behavior */
+/* Vectoring to instruction that is being filled into the i-cache may cause erroneous behavior */
#define ANOMALY_05000095 (__SILICON_REVISION__ < 2)
-/* PREFETCH, FLUSH, and FLUSHINV must be followed by a CSYNC */
+/* PREFETCH, FLUSH, and FLUSHINV Instructions Must Be Followed by a CSYNC Instruction */
#define ANOMALY_05000096 (__SILICON_REVISION__ < 2)
/* Performance Monitor 0 and 1 are swapped when monitoring memory events */
#define ANOMALY_05000097 (__SILICON_REVISION__ < 2)
@@ -235,45 +235,45 @@
#define ANOMALY_05000100 (__SILICON_REVISION__ < 2)
/* Reading X_MODIFY or Y_MODIFY while DMA channel is active */
#define ANOMALY_05000101 (__SILICON_REVISION__ < 2)
-/* Descriptor-based MemDMA may lock up with 32-bit transfers or if transfers span 64KB buffers */
+/* Descriptor MemDMA may lock up with 32-bit transfers or if transfers span 64KB buffers */
#define ANOMALY_05000102 (__SILICON_REVISION__ < 2)
-/* Incorrect value written to the cycle counters */
+/* Incorrect Value Written to the Cycle Counters */
#define ANOMALY_05000103 (__SILICON_REVISION__ < 2)
-/* Stores to L1 Data memory incorrect when a specific sequence is followed */
+/* Stores to L1 Data Memory Incorrect when a Specific Sequence Is Followed */
#define ANOMALY_05000104 (__SILICON_REVISION__ < 2)
/* Programmable Flag (PF3) functionality not supported in all PPI modes */
#define ANOMALY_05000106 (__SILICON_REVISION__ < 2)
/* Data store can be lost when targeting a cache line fill */
#define ANOMALY_05000107 (__SILICON_REVISION__ < 2)
-/* Reserved bits in SYSCFG register not set at power on */
+/* Reserved Bits in SYSCFG Register Not Set at Power-On */
#define ANOMALY_05000109 (__SILICON_REVISION__ < 3)
/* Infinite Core Stall */
#define ANOMALY_05000114 (__SILICON_REVISION__ < 2)
-/* PPI_FSx may glitch when generated by the on chip Timers */
+/* PPI_FSx may glitch when generated by the on chip Timers. */
#define ANOMALY_05000115 (__SILICON_REVISION__ < 2)
-/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */
+/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
/* DTEST registers allow access to Data Cache when DTEST_COMMAND< 14 >= 0 */
#define ANOMALY_05000117 (__SILICON_REVISION__ < 2)
/* Booting from an 8-bit or 24-bit Addressable SPI device is not supported */
#define ANOMALY_05000118 (__SILICON_REVISION__ < 2)
-/* DTEST_COMMAND initiated memory access may be incorrect if data cache or DMA is active */
+/* DTEST_COMMAND Initiated Memory Access May Be Incorrect If Data Cache or DMA Is Active */
#define ANOMALY_05000123 (__SILICON_REVISION__ < 3)
/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
#define ANOMALY_05000124 (__SILICON_REVISION__ < 3)
-/* Erroneous exception when enabling cache */
+/* Erroneous Exception when Enabling Cache */
#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
/* SPI clock polarity and phase bits incorrect during booting */
#define ANOMALY_05000126 (__SILICON_REVISION__ < 3)
-/* DMEM_CONTROL is not set on Reset */
+/* DMEM_CONTROL<12> Is Not Set on Reset */
#define ANOMALY_05000137 (__SILICON_REVISION__ < 3)
/* SPI boot will not complete if there is a zero fill block in the loader file */
#define ANOMALY_05000138 (__SILICON_REVISION__ == 2)
-/* Timerx_Config must be set for using the PPI in GP output mode with internal Frame Syncs */
+/* TIMERx_CONFIG[5] must be set for PPI in GP output mode with internal Frame Syncs */
#define ANOMALY_05000139 (__SILICON_REVISION__ < 2)
/* Allowing the SPORT RX FIFO to fill will cause an overflow */
#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
-/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */
+/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
@@ -287,7 +287,7 @@
#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
-/* When booting from a 16-bit asynchronous memory device, the upper 8-bits of each word must be 0x00 */
+/* When booting from 16-bit asynchronous memory, the upper 8 bits of each word must be 0x00 */
#define ANOMALY_05000148 (__SILICON_REVISION__ < 3)
/* Frame Delay in SPORT Multichannel Mode */
#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
@@ -295,13 +295,13 @@
#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
/* Timer1 can not be used for PWMOUT mode when a certain PPI mode is in use */
#define ANOMALY_05000155 (__SILICON_REVISION__ < 3)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
-/* SPORT transmit data is not gated by external frame sync in certain conditions */
+/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
-/* SDRAM auto-refresh and subsequent Power Ups */
+/* Undefined Behavior when Power-Up Sequence Is Issued to SDRAM during Auto-Refresh */
#define ANOMALY_05000168 (__SILICON_REVISION__ < 3)
-/* DATA CPLB page miss can result in lost write-through cache data writes */
+/* DATA CPLB Page Miss Can Result in Lost Write-Through Data Cache Writes */
#define ANOMALY_05000169 (__SILICON_REVISION__ < 3)
/* DMA vs Core accesses to external memory */
#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
@@ -309,15 +309,15 @@
#define ANOMALY_05000174 (__SILICON_REVISION__ < 3)
/* Overlapping Sequencer and Memory Stalls */
#define ANOMALY_05000175 (__SILICON_REVISION__ < 3)
-/* Multiplication of (-1) by (-1) followed by an accumulator saturation */
+/* Overflow Bit Asserted when Multiplication of -1 by -1 Followed by Accumulator Saturation */
#define ANOMALY_05000176 (__SILICON_REVISION__ < 3)
-/* Disabling the PPI resets the PPI configuration registers */
+/* Disabling the PPI Resets the PPI Configuration Registers */
#define ANOMALY_05000181 (__SILICON_REVISION__ < 3)
-/* PPI TX Mode with 2 External Frame Syncs */
+/* Early PPI Transmit when FS1 Asserts before FS2 in TX Mode with 2 External Frame Syncs */
#define ANOMALY_05000185 (__SILICON_REVISION__ < 3)
/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
-/* In PPI Transmit Modes with External Frame Syncs POLC */
+/* In PPI Transmit Modes with External Frame Syncs POLC bit must be set to 1 */
#define ANOMALY_05000192 (__SILICON_REVISION__ < 3)
/* Internal Voltage Regulator may not start up */
#define ANOMALY_05000206 (__SILICON_REVISION__ < 3)
@@ -326,6 +326,7 @@
#define ANOMALY_05000120 (0)
#define ANOMALY_05000149 (0)
#define ANOMALY_05000171 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000266 (0)
@@ -345,5 +346,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/blackfin.h b/arch/blackfin/mach-bf533/include/mach/blackfin.h
index 045184f..39aa175 100644
--- a/arch/blackfin/mach-bf533/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf533/include/mach/blackfin.h
@@ -34,7 +34,6 @@
#define BF533_FAMILY
#include "bf533.h"
-#include "mem_map.h"
#include "defBF532.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf533/include/mach/mem_map.h b/arch/blackfin/mach-bf533/include/mach/mem_map.h
index fc33b7c..197af1a 100644
--- a/arch/blackfin/mach-bf533/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf533/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * File: include/asm-blackfin/mach-bf533/mem_map.h
- * Based on:
- * Author:
+ * BF533 memory map
*
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_533_H_
-#define _MEM_MAP_533_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -158,20 +136,4 @@
#endif
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_533_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index ff7228c..c1f76dd 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -79,7 +79,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index fc96634..57c128c 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -34,13 +34,13 @@
# define ANOMALY_BF537 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
#define ANOMALY_05000180 (1)
@@ -50,11 +50,11 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
-/* CLKIN Buffer Output Enable Reset Behavior Is Changed */
+/* Buffered CLKIN Output Is Disabled by Default */
#define ANOMALY_05000247 (1)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ < 3)
-/* EMAC Tx DMA error after an early frame abort */
+/* EMAC TX DMA Error After an Early Frame Abort */
#define ANOMALY_05000252 (__SILICON_REVISION__ < 3)
/* Maximum External Clock Speed for Timers */
#define ANOMALY_05000253 (__SILICON_REVISION__ < 3)
@@ -62,7 +62,7 @@
#define ANOMALY_05000254 (__SILICON_REVISION__ > 2)
/* Entering Hibernate State with RTC Seconds Interrupt Not Functional */
#define ANOMALY_05000255 (__SILICON_REVISION__ < 3)
-/* EMAC MDIO input latched on wrong MDC edge */
+/* EMAC MDIO Input Latched on Wrong MDC Edge */
#define ANOMALY_05000256 (__SILICON_REVISION__ < 3)
/* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */
#define ANOMALY_05000257 (__SILICON_REVISION__ < 3)
@@ -80,7 +80,7 @@
#define ANOMALY_05000264 (__SILICON_REVISION__ < 3)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (1)
-/* Memory DMA error when peripheral DMA is running with non-zero DEB_TRAFFIC_PERIOD */
+/* Memory DMA Error when Peripheral DMA Is Running with Non-Zero DEB_TRAFFIC_PERIOD */
#define ANOMALY_05000268 (__SILICON_REVISION__ < 3)
/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
#define ANOMALY_05000270 (__SILICON_REVISION__ < 3)
@@ -92,15 +92,15 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2))
-/* SPI Master boot mode does not work well with Atmel Data flash devices */
+/* SPI Master Boot Mode Does Not Work Well with Atmel Data Flash Devices */
#define ANOMALY_05000280 (1)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 3)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 3)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 3)
-/* New Feature: EMAC TX DMA Word Alignment (Not Available On Older Silicon) */
+/* TXDWA Bit in EMAC_SYSCTL Register Is Not Functional */
#define ANOMALY_05000285 (__SILICON_REVISION__ < 3)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 3)
@@ -112,25 +112,25 @@
#define ANOMALY_05000305 (__SILICON_REVISION__ < 3)
/* SCKELOW Bit Does Not Maintain State Through Hibernate */
#define ANOMALY_05000307 (__SILICON_REVISION__ < 3)
-/* Writing UART_THR while UART clock is disabled sends erroneous start bit */
+/* Writing UART_THR While UART Clock Is Disabled Sends Erroneous Start Bit */
#define ANOMALY_05000309 (__SILICON_REVISION__ < 3)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (1)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (1)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode: collisions occur in Full Duplex mode */
+/* EMAC RMII Mode: Collisions Occur in Full Duplex Mode */
#define ANOMALY_05000316 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode: TX frames in half duplex fail with status No Carrier */
+/* EMAC RMII Mode: TX Frames in Half Duplex Fail with Status "No Carrier" */
#define ANOMALY_05000321 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode at 10-Base-T speed: RX frames not received properly */
+/* EMAC RMII Mode at 10-Base-T Speed: RX Frames Not Received Properly */
#define ANOMALY_05000322 (1)
/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */
#define ANOMALY_05000341 (__SILICON_REVISION__ >= 3)
-/* New Feature: UART Remains Enabled after UART Boot */
+/* UART Gets Disabled after UART Boot */
#define ANOMALY_05000350 (__SILICON_REVISION__ >= 3)
/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
#define ANOMALY_05000355 (1)
@@ -154,7 +154,7 @@
#define ANOMALY_05000426 (1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
@@ -165,14 +165,17 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000266 (0)
@@ -195,5 +198,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index 7d6069c..f5e5015 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -35,7 +35,6 @@
#define BF537_FAMILY
#include "bf537.h"
-#include "mem_map.h"
#include "defBF534.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf537/include/mach/mem_map.h b/arch/blackfin/mach-bf537/include/mach/mem_map.h
index f9010c4..942f08d 100644
--- a/arch/blackfin/mach-bf537/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf537/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf537/mem_map.h
- * based on:
- * author:
+ * BF537 memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF537/6/4 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_537_H_
-#define _MEM_MAP_537_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -166,20 +144,4 @@
#endif
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_537_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 175ca9e..c97acdf 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -30,13 +30,13 @@
# define ANOMALY_BF539 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
#define ANOMALY_05000179 (1)
@@ -70,11 +70,11 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 4)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 4)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 4)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 4)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 4)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 4)
@@ -92,11 +92,11 @@
#define ANOMALY_05000307 (__SILICON_REVISION__ < 4)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 5)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (__SILICON_REVISION__ < 4)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 4)
/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */
#define ANOMALY_05000318 (ANOMALY_BF539 && __SILICON_REVISION__ < 4)
@@ -110,7 +110,7 @@
#define ANOMALY_05000371 (__SILICON_REVISION__ < 5)
/* Entering Hibernate State with Peripheral Wakeups Enabled Draws Excess Current */
#define ANOMALY_05000374 (__SILICON_REVISION__ == 4)
-/* New Feature: Open-Drain GPIO Outputs on PC1 and PC4 (Not Available on Older Silicon) */
+/* GPIO Pins PC1 and PC4 Can Function as Normal Outputs */
#define ANOMALY_05000375 (__SILICON_REVISION__ < 4)
/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
#define ANOMALY_05000402 (__SILICON_REVISION__ < 4)
@@ -126,26 +126,32 @@
#define ANOMALY_05000436 (__SILICON_REVISION__ > 3)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
#define ANOMALY_05000120 (0)
+#define ANOMALY_05000125 (0)
#define ANOMALY_05000149 (0)
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
#define ANOMALY_05000254 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000263 (0)
+#define ANOMALY_05000266 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000305 (0)
@@ -166,5 +172,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index 6f62835..9496196 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -35,7 +35,6 @@
#define BF538_FAMILY
#include "bf538.h"
-#include "mem_map.h"
#include "defBF539.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf538/include/mach/mem_map.h b/arch/blackfin/mach-bf538/include/mach/mem_map.h
index 7681196..aff00f4 100644
--- a/arch/blackfin/mach-bf538/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf538/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * File: include/asm-blackfin/mach-bf538/mem_map.h
- * Based on:
- * Author:
+ * BF538 memory map
*
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_538_H_
-#define _MEM_MAP_538_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -93,21 +71,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE*/
-
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_538_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 805a57b..81f5b95 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -76,7 +76,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index c510ae6..18a4cd24 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -18,7 +18,7 @@
# error will not work on BF548 silicon version 0.0, or 0.1
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
@@ -30,17 +30,17 @@
#define ANOMALY_05000265 (1)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
#define ANOMALY_05000272 (1)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
/* TWI Slave Boot Mode Is Not Functional */
#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
-/* External FIFO Boot Mode Is Not Functional */
+/* FIFO Boot Mode Not Functional */
#define ANOMALY_05000325 (__SILICON_REVISION__ < 2)
/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
@@ -178,8 +178,12 @@
#define ANOMALY_05000450 (1)
/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
#define ANOMALY_05000456 (__SILICON_REVISION__ < 3)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* USB Rx DMA hang */
+#define ANOMALY_05000465 (1)
+/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
+#define ANOMALY_05000467 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -189,30 +193,36 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
#define ANOMALY_05000254 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
#define ANOMALY_05000305 (0)
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index cf6c150..6b97396 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf548.h"
-#include "mem_map.h"
#include "anomaly.h"
#ifdef CONFIG_BF542
diff --git a/arch/blackfin/mach-bf548/include/mach/mem_map.h b/arch/blackfin/mach-bf548/include/mach/mem_map.h
index 70b9c11..caac2df 100644
--- a/arch/blackfin/mach-bf548/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf548/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf548/mem_map.h
- * based on:
- * author:
+ * BF548 memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF537/6/4 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_548_H_
-#define _MEM_MAP_548_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x2C000000 /* Async Bank 3 */
@@ -103,15 +81,4 @@
# define L2_LENGTH 0x20000
#endif
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif/* _MEM_MAP_548_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index b5ef7ff..4df904f 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -62,7 +62,6 @@
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index dccd396..94b8e27 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -18,19 +18,19 @@
# error will not work on BF561 silicon version 0.0, 0.1, 0.2, or 0.4
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
-/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */
+/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
-/* Testset instructions restricted to 32-bit aligned memory locations */
+/* TESTSET Instructions Restricted to 32-Bit Aligned Memory Locations */
#define ANOMALY_05000120 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Erroneous exception when enabling cache */
+/* Erroneous Exception when Enabling Cache */
#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
-/* Signbits instruction not functional under certain conditions */
+/* SIGNBITS Instruction Not Functional under Certain Conditions */
#define ANOMALY_05000127 (1)
/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
@@ -40,7 +40,7 @@
#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
/* Allowing the SPORT RX FIFO to fill will cause an overflow */
#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
-/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */
+/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
@@ -52,7 +52,7 @@
#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
-/* IMDMA S1/D1 channel may stall */
+/* IMDMA S1/D1 Channel May Stall */
#define ANOMALY_05000149 (1)
/* DMA engine may lose data due to incorrect handshaking */
#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
@@ -66,7 +66,7 @@
#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
/* Timers in PWM-Out Mode with PPI GP Receive (Input) Mode with 0 Frame Syncs */
#define ANOMALY_05000156 (__SILICON_REVISION__ < 4)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
@@ -76,17 +76,17 @@
#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
/* DMEM_CONTROL<12> is not set on Reset */
#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
-/* SPORT transmit data is not gated by external frame sync in certain conditions */
+/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
#define ANOMALY_05000167 (1)
-/* SDRAM auto-refresh and subsequent Power Ups */
+/* Undefined Behavior when Power-Up Sequence Is Issued to SDRAM during Auto-Refresh */
#define ANOMALY_05000168 (__SILICON_REVISION__ < 5)
-/* DATA CPLB page miss can result in lost write-through cache data writes */
+/* DATA CPLB Page Miss Can Result in Lost Write-Through Data Cache Writes */
#define ANOMALY_05000169 (__SILICON_REVISION__ < 5)
-/* Boot-ROM code modifies SICA_IWRx wakeup registers */
+/* Boot-ROM Modifies SICA_IWRx Wakeup Registers */
#define ANOMALY_05000171 (__SILICON_REVISION__ < 5)
/* DSPID register values incorrect */
#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
@@ -96,29 +96,29 @@
#define ANOMALY_05000174 (__SILICON_REVISION__ < 5)
/* Overlapping Sequencer and Memory Stalls */
#define ANOMALY_05000175 (__SILICON_REVISION__ < 5)
-/* Multiplication of (-1) by (-1) followed by an accumulator saturation */
+/* Overflow Bit Asserted when Multiplication of -1 by -1 Followed by Accumulator Saturation */
#define ANOMALY_05000176 (__SILICON_REVISION__ < 5)
/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
#define ANOMALY_05000179 (__SILICON_REVISION__ < 5)
/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
#define ANOMALY_05000180 (1)
-/* Disabling the PPI resets the PPI configuration registers */
+/* Disabling the PPI Resets the PPI Configuration Registers */
#define ANOMALY_05000181 (__SILICON_REVISION__ < 5)
-/* IMDMA does not operate to full speed for 600MHz and higher devices */
+/* Internal Memory DMA Does Not Operate at Full Speed */
#define ANOMALY_05000182 (1)
-/* Timer Pin limitations for PPI TX Modes with External Frame Syncs */
+/* Timer Pin Limitations for PPI TX Modes with External Frame Syncs */
#define ANOMALY_05000184 (__SILICON_REVISION__ < 5)
-/* PPI TX Mode with 2 External Frame Syncs */
+/* Early PPI Transmit when FS1 Asserts before FS2 in TX Mode with 2 External Frame Syncs */
#define ANOMALY_05000185 (__SILICON_REVISION__ < 5)
-/* PPI packing with Data Length greater than 8 bits (not a meaningful mode) */
+/* Upper PPI Pins Driven when PPI Packing Enabled and Data Length >8 Bits */
#define ANOMALY_05000186 (__SILICON_REVISION__ < 5)
/* IMDMA Corrupted Data after a Halt */
#define ANOMALY_05000187 (1)
/* IMDMA Restrictions on Descriptor and Buffer Placement in Memory */
#define ANOMALY_05000188 (__SILICON_REVISION__ < 5)
-/* False Protection Exceptions */
+/* False Protection Exceptions when Speculative Fetch Is Cancelled */
#define ANOMALY_05000189 (__SILICON_REVISION__ < 5)
-/* PPI not functional at core voltage < 1Volt */
+/* PPI Not Functional at Core Voltage < 1Volt */
#define ANOMALY_05000190 (1)
/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
@@ -126,7 +126,7 @@
#define ANOMALY_05000193 (__SILICON_REVISION__ < 5)
/* Restarting SPORT in Specific Modes May Cause Data Corruption */
#define ANOMALY_05000194 (__SILICON_REVISION__ < 5)
-/* Failing MMR Accesses When Stalled by Preceding Memory Read */
+/* Failing MMR Accesses when Preceding Memory Read Stalls */
#define ANOMALY_05000198 (__SILICON_REVISION__ < 5)
/* Current DMA Address Shows Wrong Value During Carry Fix */
#define ANOMALY_05000199 (__SILICON_REVISION__ < 5)
@@ -134,9 +134,9 @@
#define ANOMALY_05000200 (__SILICON_REVISION__ < 5)
/* Possible Infinite Stall with Specific Dual-DAG Situation */
#define ANOMALY_05000202 (__SILICON_REVISION__ < 5)
-/* Incorrect data read with write-through cache and allocate cache lines on reads only mode */
+/* Incorrect Data Read with Writethrough "Allocate Cache Lines on Reads Only" Cache Mode */
#define ANOMALY_05000204 (__SILICON_REVISION__ < 5)
-/* Specific sequence that can cause DMA error or DMA stopping */
+/* Specific Sequence that Can Cause DMA Error or DMA Stopping */
#define ANOMALY_05000205 (__SILICON_REVISION__ < 5)
/* Recovery from "Brown-Out" Condition */
#define ANOMALY_05000207 (__SILICON_REVISION__ < 5)
@@ -158,7 +158,7 @@
#define ANOMALY_05000230 (__SILICON_REVISION__ < 5)
/* UART STB Bit Incorrectly Affects Receiver Setting */
#define ANOMALY_05000231 (__SILICON_REVISION__ < 5)
-/* SPORT data transmit lines are incorrectly driven in multichannel mode */
+/* SPORT Data Transmit Lines Are Incorrectly Driven in Multichannel Mode */
#define ANOMALY_05000232 (__SILICON_REVISION__ < 5)
/* DF Bit in PLL_CTL Register Does Not Respond to Hardware Reset */
#define ANOMALY_05000242 (__SILICON_REVISION__ < 5)
@@ -166,7 +166,7 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 5)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (__SILICON_REVISION__ < 5)
-/* TESTSET operation forces stall on the other core */
+/* TESTSET Operation Forces Stall on the Other Core */
#define ANOMALY_05000248 (__SILICON_REVISION__ < 5)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ > 2 && __SILICON_REVISION__ < 5)
@@ -192,9 +192,9 @@
#define ANOMALY_05000264 (__SILICON_REVISION__ < 5)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (__SILICON_REVISION__ < 5)
-/* IMDMA destination IRQ status must be read prior to using IMDMA */
+/* IMDMA Destination IRQ Status Must Be Read Prior to Using IMDMA */
#define ANOMALY_05000266 (__SILICON_REVISION__ > 3)
-/* IMDMA may corrupt data under certain conditions */
+/* IMDMA May Corrupt Data under Certain Conditions */
#define ANOMALY_05000267 (1)
/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Increase */
#define ANOMALY_05000269 (1)
@@ -202,7 +202,7 @@
#define ANOMALY_05000270 (1)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
#define ANOMALY_05000272 (1)
-/* Data cache write back to external synchronous memory may be lost */
+/* Data Cache Write Back to External Synchronous Memory May Be Lost */
#define ANOMALY_05000274 (1)
/* PPI Timing and Sampling Information Updates */
#define ANOMALY_05000275 (__SILICON_REVISION__ > 2)
@@ -212,17 +212,17 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 5)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (1)
-/* A read will receive incorrect data under certain conditions */
+/* Reads Will Receive Incorrect Data under Certain Conditions */
#define ANOMALY_05000287 (__SILICON_REVISION__ < 5)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 5)
/* Memory-To-Memory DMA Source/Destination Descriptors Must Be in Same Memory Space */
#define ANOMALY_05000301 (1)
-/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
+/* SSYNCs after Writes to DMA MMR Registers May Not Be Handled Correctly */
#define ANOMALY_05000302 (1)
/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
@@ -230,25 +230,25 @@
#define ANOMALY_05000307 (__SILICON_REVISION__ < 5)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (1)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (1)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (1)
-/* PF2 Output Remains Asserted After SPI Master Boot */
+/* PF2 Output Remains Asserted after SPI Master Boot */
#define ANOMALY_05000320 (__SILICON_REVISION__ > 3)
-/* Erroneous GPIO Flag Pin Operations Under Specific Sequences */
+/* Erroneous GPIO Flag Pin Operations under Specific Sequences */
#define ANOMALY_05000323 (1)
-/* SPORT Secondary Receive Channel Not Functional When Word Length Exceeds 16 Bits */
+/* SPORT Secondary Receive Channel Not Functional when Word Length >16 Bits */
#define ANOMALY_05000326 (__SILICON_REVISION__ > 3)
-/* New Feature: 24-Bit SPI Boot Mode Support (Not Available On Older Silicon) */
+/* 24-Bit SPI Boot Mode Is Not Functional */
#define ANOMALY_05000331 (__SILICON_REVISION__ < 5)
-/* New Feature: Slave SPI Boot Mode Supported (Not Available On Older Silicon) */
+/* Slave SPI Boot Mode Is Not Functional */
#define ANOMALY_05000332 (__SILICON_REVISION__ < 5)
-/* Flag Data Register Writes One SCLK Cycle After Edge Is Detected May Clear Interrupt Status */
+/* Flag Data Register Writes One SCLK Cycle after Edge Is Detected May Clear Interrupt Status */
#define ANOMALY_05000333 (__SILICON_REVISION__ < 5)
-/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available on Older Silicon) */
+/* ALT_TIMING Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000339 (__SILICON_REVISION__ < 5)
/* Memory DMA FIFO Causes Throughput Degradation on Writes to External Memory */
#define ANOMALY_05000343 (__SILICON_REVISION__ < 5)
@@ -276,7 +276,7 @@
#define ANOMALY_05000428 (__SILICON_REVISION__ > 3)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
@@ -284,6 +284,7 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000353 (1)
@@ -298,5 +299,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h
index f79f6626..8be3135 100644
--- a/arch/blackfin/mach-bf561/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h
@@ -34,7 +34,6 @@
#define BF561_FAMILY
#include "bf561.h"
-#include "mem_map.h"
#include "defBF561.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index 419dffd..a63e15c 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -1,13 +1,16 @@
/*
- * Memory MAP
- * Common header file for blackfin BF561 of processors.
+ * BF561 memory map
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_561_H_
-#define _MEM_MAP_561_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x2C000000 /* Async Bank 3 */
@@ -82,9 +85,6 @@
#define COREA_L1_SCRATCH_START 0xFFB00000
#define COREB_L1_SCRATCH_START 0xFF700000
-#define L1_SCRATCH_START COREA_L1_SCRATCH_START
-#define L1_SCRATCH_LENGTH 0x1000
-
#ifdef __ASSEMBLY__
/*
@@ -155,14 +155,42 @@
dreg = ROT dreg BY -1; \
dreg = CC;
-#else
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
+static inline unsigned long get_l1_scratch_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
+}
+static inline unsigned long get_l1_code_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_CODE_START : COREA_L1_CODE_START;
+}
+static inline unsigned long get_l1_data_a_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;
+}
+static inline unsigned long get_l1_data_b_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;
+}
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
+static inline unsigned long get_l1_scratch_start(void)
+{
+ return get_l1_scratch_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_code_start(void)
+{
+ return get_l1_code_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_data_a_start(void)
+{
+ return get_l1_data_a_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_data_b_start(void)
+{
+ return get_l1_data_b_start_cpu(blackfin_core_id());
+}
+
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
-#endif /* _MEM_MAP_533_H_ */
+#endif
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index da93d92..5998d86 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -74,7 +74,7 @@
/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
#if ANOMALY_05000220 && \
- ((defined(CONFIG_BFIN_WB) && defined(CONFIG_BFIN_L2_NOT_CACHED)) || \
- (!defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_L2_WB)))
+ ((defined(CONFIG_BFIN_EXTMEM_WRITEBACK) && !defined(CONFIG_BFIN_L2_DCACHEABLE)) || \
+ (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
#endif
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 70e3411..85c6580 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -141,7 +141,7 @@
sclk = get_sclk() / 1000;
#if ANOMALY_05000273 || ANOMALY_05000274 || \
- (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE))
+ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
min_cclk = sclk * 2;
#else
min_cclk = sclk;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 31fa313..5a4e7c7 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1609,6 +1609,7 @@
.long _sys_preadv
.long _sys_pwritev
.long _sys_rt_tgsigqueueinfo
+ .long _sys_perf_counter_open
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index af70f09..b421501 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1052,35 +1052,34 @@
set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-#if defined(CONFIG_TICKSOURCE_GPTMR0)
- case IRQ_TIMER0:
- set_irq_handler(irq, handle_percpu_irq);
- break;
-#endif
#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
set_irq_handler(irq, handle_percpu_irq);
break;
#endif
- default:
#ifdef CONFIG_IPIPE
- /*
- * We want internal interrupt sources to be
- * masked, because ISRs may trigger interrupts
- * recursively (e.g. DMA), but interrupts are
- * _not_ masked at CPU level. So let's handle
- * most of them as level interrupts, except
- * the timer interrupt which is special.
- */
- if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
- set_irq_handler(irq, handle_simple_irq);
- else
- set_irq_handler(irq, handle_level_irq);
-#else /* !CONFIG_IPIPE */
+#ifndef CONFIG_TICKSOURCE_CORETMR
+ case IRQ_TIMER0:
set_irq_handler(irq, handle_simple_irq);
-#endif /* !CONFIG_IPIPE */
break;
+#endif /* !CONFIG_TICKSOURCE_CORETMR */
+ case IRQ_CORETMR:
+ set_irq_handler(irq, handle_simple_irq);
+ break;
+ default:
+ set_irq_handler(irq, handle_level_irq);
+ break;
+#else /* !CONFIG_IPIPE */
+#ifdef CONFIG_TICKSOURCE_GPTMR0
+ case IRQ_TIMER0:
+ set_irq_handler(irq, handle_percpu_irq);
+ break;
+#endif /* CONFIG_TICKSOURCE_GPTMR0 */
+ default:
+ set_irq_handler(irq, handle_simple_irq);
+ break;
+#endif /* !CONFIG_IPIPE */
}
}
@@ -1224,15 +1223,14 @@
asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
- struct ipipe_domain *this_domain = ipipe_current_domain;
+ struct ipipe_domain *this_domain = __ipipe_current_domain;
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
int irq, s;
- if (likely(vec == EVT_IVTMR_P)) {
+ if (likely(vec == EVT_IVTMR_P))
irq = IRQ_CORETMR;
-
- } else {
+ else {
#if defined(SIC_ISR0) || defined(SICA_ISR0)
unsigned long sic_status[3];
@@ -1262,12 +1260,11 @@
break;
}
#endif
-
irq = ivg->irqno;
}
if (irq == IRQ_SYSTMR) {
-#ifndef CONFIG_GENERIC_CLOCKEVENTS
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
#endif
/* This is basically what we need from the register frame. */
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index bce5a84..9e7e27b 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -132,7 +132,7 @@
return 0;
}
-#ifdef CONFIG_BFIN_WB
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
static void flushinv_all_dcache(void)
{
u32 way, bank, subbank, set;
@@ -175,7 +175,7 @@
#ifdef CONFIG_BFIN_DCACHE
unsigned long ctrl;
-#ifdef CONFIG_BFIN_WB
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
flushinv_all_dcache();
#endif
SSYNC();
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 014a55a..68bd0bd6 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -160,7 +160,7 @@
/* do not count in kernel image between _rambase and _ramstart */
reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT;
-#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT;
#endif
diff --git a/arch/cris/arch-v10/kernel/dma.c b/arch/cris/arch-v10/kernel/dma.c
index 929e686..d31504b 100644
--- a/arch/cris/arch-v10/kernel/dma.c
+++ b/arch/cris/arch-v10/kernel/dma.c
@@ -24,7 +24,7 @@
unsigned long int gens;
int fail = -EINVAL;
- if ((dmanr < 0) || (dmanr >= MAX_DMA_CHANNELS)) {
+ if (dmanr >= MAX_DMA_CHANNELS) {
printk(KERN_CRIT "cris_request_dma: invalid DMA channel %u\n", dmanr);
return -EINVAL;
}
@@ -213,7 +213,7 @@
void cris_free_dma(unsigned int dmanr, const char * device_id)
{
unsigned long flags;
- if ((dmanr < 0) || (dmanr >= MAX_DMA_CHANNELS)) {
+ if (dmanr >= MAX_DMA_CHANNELS) {
printk(KERN_CRIT "cris_free_dma: invalid DMA channel %u\n", dmanr);
return;
}
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 67c61ea..fd529a0 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -1395,7 +1395,7 @@
if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
p = kmalloc(padlen, alloc_flag);
- if (!pad) return -ENOMEM;
+ if (!p) return -ENOMEM;
*p = 0x80;
memset(p+1, 0, padlen - 1);
@@ -1427,7 +1427,7 @@
if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
p = kmalloc(padlen, alloc_flag);
- if (!pad) return -ENOMEM;
+ if (!p) return -ENOMEM;
*p = 0x80;
memset(p+1, 0, padlen - 1);
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index d70b445..57668db 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -430,8 +430,8 @@
masked[i] &= ~TIMER_MASK;
do_IRQ(TIMER0_INTR_VECT, regs);
}
- }
#endif
+ }
#ifdef IGNORE_MASK
/* Remove IRQs that can't be handled as multiple. */
diff --git a/arch/cris/arch-v32/lib/Makefile b/arch/cris/arch-v32/lib/Makefile
index eb4aad1..dd296b9 100644
--- a/arch/cris/arch-v32/lib/Makefile
+++ b/arch/cris/arch-v32/lib/Makefile
@@ -3,5 +3,5 @@
#
lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o \
- csumcpfruser.o spinlock.o delay.o
+ csumcpfruser.o spinlock.o delay.o strcmp.o
diff --git a/arch/cris/arch-v32/lib/strcmp.S b/arch/cris/arch-v32/lib/strcmp.S
new file mode 100644
index 0000000..8f7a1ee
--- /dev/null
+++ b/arch/cris/arch-v32/lib/strcmp.S
@@ -0,0 +1,21 @@
+; strcmp.S -- CRISv32 version.
+; Copyright (C) 2008 AXIS Communications AB
+; Written by Edgar E. Iglesias
+;
+; This source code is licensed under the GNU General Public License,
+; Version 2. See the file COPYING for more details.
+
+ .global strcmp
+ .type strcmp,@function
+strcmp:
+1:
+ move.b [$r10+], $r12
+ seq $r13
+ sub.b [$r11+], $r12
+ or.b $r12, $r13
+ beq 1b
+ nop
+
+ ret
+ movs.b $r12, $r10
+ .size strcmp, . - strcmp
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index 129756b..367a53e 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -78,7 +78,7 @@
{
__raw_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
- rw->lock == 0;
+ rw->lock = 0;
__raw_spin_unlock(&rw->slock);
}
@@ -93,7 +93,7 @@
{
__raw_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
- rw->lock == RW_LOCK_BIAS;
+ rw->lock = RW_LOCK_BIAS;
__raw_spin_unlock(&rw->slock);
}
@@ -114,7 +114,7 @@
int ret = 0;
__raw_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
- rw->lock == 0;
+ rw->lock = 0;
ret = 1;
}
__raw_spin_unlock(&rw->slock);
diff --git a/arch/cris/include/asm/string.h b/arch/cris/include/asm/string.h
index 691190e..d5db39f 100644
--- a/arch/cris/include/asm/string.h
+++ b/arch/cris/include/asm/string.h
@@ -11,4 +11,10 @@
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, size_t);
+#ifdef CONFIG_ETRAX_ARCH_V32
+/* For v32 we provide strcmp. */
+#define __HAVE_ARCH_STRCMP
+extern int strcmp(const char *s1, const char *s2);
+#endif
+
#endif
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
index b65dcfe..6e2ecff 100644
--- a/arch/h8300/Kconfig.cpu
+++ b/arch/h8300/Kconfig.cpu
@@ -13,7 +13,7 @@
config H8300H_AKI3068NET
bool "AE-3068/69"
- select CONFIG_H83068
+ select H83068
help
AKI-H8/3068F / AKI-H8/3069F Flashmicom LAN Board Support
More Information. (Japanese Only)
@@ -24,7 +24,7 @@
config H8300H_H8MAX
bool "H8MAX"
- select CONFIG_H83068
+ select H83068
help
H8MAX Evaluation Board Support
More Information. (Japanese Only)
@@ -32,7 +32,7 @@
config H8300H_SIM
bool "H8/300H Simulator"
- select CONFIG_H83007
+ select H83007
help
GDB Simulator Support
More Information.
@@ -45,7 +45,7 @@
config H8S_EDOSK2674
bool "EDOSK-2674"
- select CONFIG_H8S2768
+ select H8S2678
help
Renesas EDOSK-2674 Evaluation Board Support
More Information.
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 0490794..745e095 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -9,6 +9,11 @@
extern void no_iommu_init(void);
extern int force_iommu, no_iommu;
extern int iommu_detected;
+#ifdef CONFIG_DMAR
+extern int iommu_pass_through;
+#else
+#define iommu_pass_through (0)
+#endif
extern void iommu_dma_init(void);
extern void machvec_init(const char *name);
diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c
index cbe6cee..dbda7bd 100644
--- a/arch/ia64/kernel/acpi-processor.c
+++ b/arch/ia64/kernel/acpi-processor.c
@@ -71,3 +71,15 @@
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
+
+void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr)
+{
+ if (pr->pdc) {
+ kfree(pr->pdc->pointer->buffer.pointer);
+ kfree(pr->pdc->pointer);
+ kfree(pr->pdc);
+ pr->pdc = NULL;
+ }
+}
+
+EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 1376da4..0569596 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,6 +32,8 @@
int force_iommu __read_mostly;
#endif
+int iommu_pass_through;
+
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 285aae8..223abb1 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -46,7 +46,7 @@
void __init pci_swiotlb_init(void)
{
- if (!iommu_detected) {
+ if (!iommu_detected || iommu_pass_through) {
#ifdef CONFIG_IA64_GENERIC
swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b29f028..8c4be1f 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -601,6 +601,7 @@
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_HOTPLUG_CPU
select SYS_HAS_CPU_CAVIUM_OCTEON
help
The Octeon simulator is software performance model of the Cavium
@@ -615,6 +616,7 @@
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_HOTPLUG_CPU
select SYS_HAS_EARLY_PRINTK
select SYS_HAS_CPU_CAVIUM_OCTEON
select SWAP_IO_SPACE
@@ -784,8 +786,17 @@
bool
config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP && HOTPLUG && SYS_SUPPORTS_HOTPLUG_CPU
+ help
+ Say Y here to allow turning CPUs off and on. CPUs can be
+ controlled through /sys/devices/system/cpu.
+ (Note: power management support will enable this option
+ automatically on SMP systems. )
+ Say N if you want to disable CPU hotplug.
+
+config SYS_SUPPORTS_HOTPLUG_CPU
bool
- default n
config I8259
bool
@@ -2136,11 +2147,11 @@
config ARCH_HIBERNATION_POSSIBLE
def_bool y
- depends on !SMP
+ depends on SYS_SUPPORTS_HOTPLUG_CPU
config ARCH_SUSPEND_POSSIBLE
def_bool y
- depends on !SMP
+ depends on SYS_SUPPORTS_HOTPLUG_CPU
source "kernel/power/Kconfig"
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 8dfa009..384f184 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -7,7 +7,7 @@
*/
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/hardirq.h>
+#include <linux/smp.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-pexp-defs.h>
@@ -501,3 +501,62 @@
}
}
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
+{
+ unsigned int isset;
+#ifdef CONFIG_SMP
+ int coreid = cpu_logical_map(cpu);
+#else
+ int coreid = cvmx_get_core_num();
+#endif
+ int bit = (irq < OCTEON_IRQ_WDOG0) ?
+ irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
+ if (irq < 64) {
+ isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
+ (1ull << bit)) >> bit;
+ } else {
+ isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
+ (1ull << bit)) >> bit;
+ }
+ return isset;
+}
+
+void fixup_irqs(void)
+{
+ int irq;
+
+ for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
+ octeon_irq_core_disable_local(irq);
+
+ for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
+ if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
+ /* ciu irq migrates to next cpu */
+ octeon_irq_chip_ciu0.disable(irq);
+ octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
+ }
+ }
+
+#if 0
+ for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
+ octeon_irq_mailbox_mask(irq);
+#endif
+ for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
+ if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
+ /* ciu irq migrates to next cpu */
+ octeon_irq_chip_ciu0.disable(irq);
+ octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
+ }
+ }
+
+ for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
+ if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
+ /* ciu irq migrates to next cpu */
+ octeon_irq_chip_ciu1.disable(irq);
+ octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
+ }
+ }
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/mips/cavium-octeon/octeon_boot.h b/arch/mips/cavium-octeon/octeon_boot.h
new file mode 100644
index 0000000..0f7f84a
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon_boot.h
@@ -0,0 +1,70 @@
+/*
+ * (C) Copyright 2004, 2005 Cavium Networks
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __OCTEON_BOOT_H__
+#define __OCTEON_BOOT_H__
+
+#include <linux/types.h>
+
+struct boot_init_vector {
+ uint32_t stack_addr;
+ uint32_t code_addr;
+ uint32_t app_start_func_addr;
+ uint32_t k0_val;
+ uint32_t flags;
+ uint32_t boot_info_addr;
+ uint32_t pad;
+ uint32_t pad2;
+};
+
+/* similar to bootloader's linux_app_boot_info but without global data */
+struct linux_app_boot_info {
+ uint32_t labi_signature;
+ uint32_t start_core0_addr;
+ uint32_t avail_coremask;
+ uint32_t pci_console_active;
+ uint32_t icache_prefetch_disable;
+ uint32_t InitTLBStart_addr;
+ uint32_t start_app_addr;
+ uint32_t cur_exception_base;
+ uint32_t no_mark_private_data;
+ uint32_t compact_flash_common_base_addr;
+ uint32_t compact_flash_attribute_base_addr;
+ uint32_t led_display_base_addr;
+};
+
+/* If not to copy a lot of bootloader's structures
+ here is only offset of requested member */
+#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c
+
+/* hardcoded in bootloader */
+#define LABI_ADDR_IN_BOOTLOADER 0x700
+
+#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
+
+#define LABI_SIGNATURE 0xAABBCCDD
+
+/* from uboot-headers/octeon_mem_map.h */
+#define EXCEPTION_BASE_INCR (4 * 1024)
+ /* Increment size for exception base addresses (4k minimum) */
+#define EXCEPTION_BASE_BASE 0
+#define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800)
+#define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE)
+
+#endif /* __OCTEON_BOOT_H__ */
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 5f4e49b..da55924 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/serial.h>
+#include <linux/smp.h>
#include <linux/types.h>
#include <linux/string.h> /* for memset */
#include <linux/tty.h>
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 24e0ad6..0b891a9 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2004-2008 Cavium Networks
*/
+#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/smp.h>
@@ -19,10 +20,16 @@
#include <asm/octeon/octeon.h>
+#include "octeon_boot.h"
+
volatile unsigned long octeon_processor_boot = 0xff;
volatile unsigned long octeon_processor_sp;
volatile unsigned long octeon_processor_gp;
+#ifdef CONFIG_HOTPLUG_CPU
+static unsigned int InitTLBStart_addr;
+#endif
+
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
{
const int coreid = cvmx_get_core_num();
@@ -67,8 +74,28 @@
}
/**
- * Detect available CPUs, populate phys_cpu_present_map
+ * Detect available CPUs, populate cpu_possible_map
*/
+static void octeon_smp_hotplug_setup(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ uint32_t labi_signature;
+
+ labi_signature =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ labi_signature)));
+ if (labi_signature != LABI_SIGNATURE)
+ pr_err("The bootloader version on this board is incorrect\n");
+ InitTLBStart_addr =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ InitTLBStart_addr)));
+#endif
+}
+
static void octeon_smp_setup(void)
{
const int coreid = cvmx_get_core_num();
@@ -91,6 +118,9 @@
cpus++;
}
}
+ cpu_present_map = cpu_possible_map;
+
+ octeon_smp_hotplug_setup();
}
/**
@@ -128,6 +158,17 @@
const int coreid = cvmx_get_core_num();
union cvmx_ciu_intx_sum0 interrupt_enable;
+#ifdef CONFIG_HOTPLUG_CPU
+ unsigned int cur_exception_base;
+
+ cur_exception_base = cvmx_read64_uint32(
+ CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ cur_exception_base)));
+ /* cur_exception_base is incremented in bootloader after setting */
+ write_c0_ebase((unsigned int)(cur_exception_base - EXCEPTION_BASE_INCR));
+#endif
octeon_check_cpu_bist();
octeon_init_cvmcount();
/*
@@ -199,6 +240,193 @@
#endif
}
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* State of each CPU. */
+DEFINE_PER_CPU(int, cpu_state);
+
+extern void fixup_irqs(void);
+
+static DEFINE_SPINLOCK(smp_reserve_lock);
+
+static int octeon_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ return -EBUSY;
+
+ spin_lock(&smp_reserve_lock);
+
+ cpu_clear(cpu, cpu_online_map);
+ cpu_clear(cpu, cpu_callin_map);
+ local_irq_disable();
+ fixup_irqs();
+ local_irq_enable();
+
+ flush_cache_all();
+ local_flush_tlb_all();
+
+ spin_unlock(&smp_reserve_lock);
+
+ return 0;
+}
+
+static void octeon_cpu_die(unsigned int cpu)
+{
+ int coreid = cpu_logical_map(cpu);
+ uint32_t avail_coremask;
+ struct cvmx_bootmem_named_block_desc *block_desc;
+
+#ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG
+ /* Disable the watchdog */
+ cvmx_ciu_wdogx_t ciu_wdog;
+ ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu));
+ ciu_wdog.s.mode = 0;
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64);
+#endif
+
+ while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+ cpu_relax();
+
+ /*
+ * This is a bit complicated strategics of getting/settig available
+ * cores mask, copied from bootloader
+ */
+ /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
+ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+ if (!block_desc) {
+ avail_coremask =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof
+ (struct linux_app_boot_info,
+ avail_coremask)));
+ } else { /* alternative, already initialized */
+ avail_coremask =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ block_desc->base_addr +
+ AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
+ }
+
+ avail_coremask |= 1 << coreid;
+
+ /* Setting avail_coremask for bootoct binary */
+ if (!block_desc) {
+ cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ avail_coremask)),
+ avail_coremask);
+ } else {
+ cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ block_desc->base_addr +
+ AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK),
+ avail_coremask);
+ }
+
+ pr_info("Reset core %d. Available Coremask = %x \n", coreid,
+ avail_coremask);
+ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+ cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+}
+
+void play_dead(void)
+{
+ int coreid = cvmx_get_core_num();
+
+ idle_task_exit();
+ octeon_processor_boot = 0xff;
+ per_cpu(cpu_state, coreid) = CPU_DEAD;
+
+ while (1) /* core will be reset here */
+ ;
+}
+
+extern void kernel_entry(unsigned long arg1, ...);
+
+static void start_after_reset(void)
+{
+ kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
+}
+
+int octeon_update_boot_vector(unsigned int cpu)
+{
+
+ int coreid = cpu_logical_map(cpu);
+ unsigned int avail_coremask;
+ struct cvmx_bootmem_named_block_desc *block_desc;
+ struct boot_init_vector *boot_vect =
+ (struct boot_init_vector *) cvmx_phys_to_ptr(0x0 +
+ BOOTLOADER_BOOT_VECTOR);
+
+ block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+ if (!block_desc) {
+ avail_coremask =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ LABI_ADDR_IN_BOOTLOADER +
+ offsetof(struct linux_app_boot_info,
+ avail_coremask)));
+ } else { /* alternative, already initialized */
+ avail_coremask =
+ cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ block_desc->base_addr +
+ AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
+ }
+
+ if (!(avail_coremask & (1 << coreid))) {
+ /* core not available, assume, that catched by simple-executive */
+ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+ cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+ }
+
+ boot_vect[coreid].app_start_func_addr =
+ (uint32_t) (unsigned long) start_after_reset;
+ boot_vect[coreid].code_addr = InitTLBStart_addr;
+
+ CVMX_SYNC;
+
+ cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
+
+ return 0;
+}
+
+static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ octeon_update_boot_vector(cpu);
+ break;
+ case CPU_ONLINE:
+ pr_info("Cpu %d online\n", cpu);
+ break;
+ case CPU_DEAD:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata octeon_cpu_notifier = {
+ .notifier_call = octeon_cpu_callback,
+};
+
+static int __cpuinit register_cavium_notifier(void)
+{
+ register_hotcpu_notifier(&octeon_cpu_notifier);
+
+ return 0;
+}
+
+late_initcall(register_cavium_notifier);
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
struct plat_smp_ops octeon_smp_ops = {
.send_ipi_single = octeon_send_ipi_single,
.send_ipi_mask = octeon_send_ipi_mask,
@@ -208,4 +436,8 @@
.boot_secondary = octeon_boot_secondary,
.smp_setup = octeon_smp_setup,
.prepare_cpus = octeon_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = octeon_cpu_disable,
+ .cpu_die = octeon_cpu_die,
+#endif
};
diff --git a/arch/mips/include/asm/bug.h b/arch/mips/include/asm/bug.h
index 08ea468..6cf29c2 100644
--- a/arch/mips/include/asm/bug.h
+++ b/arch/mips/include/asm/bug.h
@@ -1,6 +1,7 @@
#ifndef __ASM_BUG_H
#define __ASM_BUG_H
+#include <linux/compiler.h>
#include <asm/sgidefs.h>
#ifdef CONFIG_BUG
diff --git a/arch/mips/include/asm/bugs.h b/arch/mips/include/asm/bugs.h
index 9dc10df..b160a70 100644
--- a/arch/mips/include/asm/bugs.h
+++ b/arch/mips/include/asm/bugs.h
@@ -11,6 +11,7 @@
#include <linux/bug.h>
#include <linux/delay.h>
+#include <linux/smp.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 4f1eed1..09b08d0 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -10,6 +10,7 @@
#define _ASM_IRQ_H
#include <linux/linkage.h>
+#include <linux/smp.h>
#include <asm/mipsmtregs.h>
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index d7f3eb0..d3bea88 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index 64ffc02..fd54554 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -26,6 +26,10 @@
void (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int cpu);
+#endif
};
extern void register_smp_ops(struct plat_smp_ops *ops);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 40e5ef1..aaa2d4a 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/linkage.h>
+#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
@@ -40,6 +41,7 @@
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
+extern volatile cpumask_t cpu_callin_map;
extern void asmlinkage smp_bootstrap(void);
@@ -55,6 +57,24 @@
mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
}
+#ifdef CONFIG_HOTPLUG_CPU
+static inline int __cpu_disable(void)
+{
+ extern struct plat_smp_ops *mp_ops; /* private */
+
+ return mp_ops->cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ extern struct plat_smp_ops *mp_ops; /* private */
+
+ mp_ops->cpu_die(cpu);
+}
+
+extern void play_dead(void);
+#endif
+
extern asmlinkage void smp_call_function_interrupt(void);
extern void arch_send_call_function_single_ipi(int cpu);
diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h
index 3a56d90..2367b56 100644
--- a/arch/mips/include/asm/sn/addrs.h
+++ b/arch/mips/include/asm/sn/addrs.h
@@ -11,6 +11,7 @@
#ifndef __ASSEMBLY__
+#include <linux/smp.h>
#include <linux/types.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index d9b6a5b..7fd170d 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/smp.h>
#include <linux/spinlock.h>
#include <asm/irq_cpu.h>
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index a5182a2..e02f79b 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -18,6 +18,7 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/smp.h>
#include <asm/addrspace.h>
#include <asm/io.h>
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 0015e44..2652362 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -9,6 +9,7 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/smp.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 340f53e..ac5903d 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -18,6 +18,7 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/smp.h>
#include <asm/addrspace.h>
#include <asm/io.h>
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index df6f5bc..98bd7de 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -10,6 +10,7 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/smp.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b13b8eb..1abe990 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/smp.h>
#include <linux/stddef.h>
#include <asm/bugs.h>
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index ed20e7f..f7d8d5d 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -7,6 +7,7 @@
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/module.h>
+#include <linux/smp.h>
#include <linux/spinlock.h>
#include <asm/delay.h>
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 3f43c2e..39000f1 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -2,6 +2,7 @@
#include <linux/bitmap.h>
#include <linux/init.h>
+#include <linux/smp.h>
#include <asm/io.h>
#include <asm/gic.h>
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 6e152c8..50c9bb8 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -26,6 +26,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <asm/inst.h>
#include <asm/fpu.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 1eaaa45..c09d681 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -50,10 +50,15 @@
*/
void __noreturn cpu_idle(void)
{
+ int cpu;
+
+ /* CPU is going idle. */
+ cpu = smp_processor_id();
+
/* endless idle loop with no priority at all */
while (1) {
tick_nohz_stop_sched_tick(1);
- while (!need_resched()) {
+ while (!need_resched() && cpu_online(cpu)) {
#ifdef CONFIG_MIPS_MT_SMTC
extern void smtc_idle_loop_hook(void);
@@ -62,6 +67,12 @@
if (cpu_wait)
(*cpu_wait)();
}
+#ifdef CONFIG_HOTPLUG_CPU
+ if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
+ (system_state == SYSTEM_RUNNING ||
+ system_state == SYSTEM_BOOTING))
+ play_dead();
+#endif
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index f27beca..653be06 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 878e373..2508d55 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -55,6 +55,18 @@
{
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int up_cpu_disable(void)
+{
+ return -ENOSYS;
+}
+
+static void up_cpu_die(unsigned int cpu)
+{
+ BUG();
+}
+#endif
+
struct plat_smp_ops up_smp_ops = {
.send_ipi_single = up_send_ipi_single,
.send_ipi_mask = up_send_ipi_mask,
@@ -64,4 +76,8 @@
.boot_secondary = up_boot_secondary,
.smp_setup = up_smp_setup,
.prepare_cpus = up_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = up_cpu_disable,
+ .cpu_die = up_cpu_die,
+#endif
};
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index c937506..bc7d9b0 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/module.h>
@@ -44,7 +45,7 @@
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
-static volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
+volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
@@ -200,6 +201,8 @@
* and keep control until "cpu_online(cpu)" is set. Note: cpu is
* physical, not logical.
*/
+static struct task_struct *cpu_idle_thread[NR_CPUS];
+
int __cpuinit __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
@@ -209,9 +212,16 @@
* The following code is purely to make sure
* Linux can schedule processes on this slave.
*/
- idle = fork_idle(cpu);
- if (IS_ERR(idle))
- panic(KERN_ERR "Fork failed for CPU %d", cpu);
+ if (!cpu_idle_thread[cpu]) {
+ idle = fork_idle(cpu);
+ cpu_idle_thread[cpu] = idle;
+
+ if (IS_ERR(idle))
+ panic(KERN_ERR "Fork failed for CPU %d", cpu);
+ } else {
+ idle = cpu_idle_thread[cpu];
+ init_idle(idle, cpu);
+ }
mp_ops->boot_secondary(cpu, idle);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 37d51cd..8a0626c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -20,6 +20,7 @@
#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
index 660e44e..cf3eb61 100644
--- a/arch/mips/kernel/topology.c
+++ b/arch/mips/kernel/topology.c
@@ -17,7 +17,10 @@
#endif /* CONFIG_NUMA */
for_each_present_cpu(i) {
- ret = register_cpu(&per_cpu(cpu_devices, i), i);
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = 1;
+ ret = register_cpu(c, i);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", i, ret);
diff --git a/arch/mips/mipssim/sim_time.c b/arch/mips/mipssim/sim_time.c
index 881ecbc..0cea932 100644
--- a/arch/mips/mipssim/sim_time.c
+++ b/arch/mips/mipssim/sim_time.c
@@ -91,6 +91,7 @@
mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
} else {
#endif
+ {
if (cpu_has_vint)
set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 44d01a0..b165cdc 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 5500c20..54e5f7b 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/page.h>
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 71fe4cb..6721ee2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/bitops.h>
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index f7c8f9c..6515b44 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/cacheops.h>
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 2b1309b..e274fda 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
#include <linux/module.h>
#include <linux/highmem.h>
+#include <linux/smp.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c551129..0e82050 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 48060c6..f5c7375 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 1c0048a..0f5ab23 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/page.h>
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index f60fe51..cee502c 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -10,6 +10,7 @@
*/
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 4ec95cc..2b82f23 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -10,6 +10,7 @@
*/
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/cpu.h>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 8f606ea..9a17bf8 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -23,6 +23,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/smp.h>
#include <linux/string.h>
#include <linux/init.h>
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index ea17611..b4eaf13 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index dda6f20..a0e726e 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/smp.h>
#include <asm/sn/arch.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index f78c29b..8ace277 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -1,5 +1,6 @@
#include <linux/linkage.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <asm/pmon.h>
#include <asm/titan_dep.h>
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index 486bd3f..4b8174b 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -43,15 +43,6 @@
bne t1, t3, 1b
PTR_L t0, PBE_NEXT(t0)
bnez t0, 0b
- /* flush caches to make sure context is in memory */
- PTR_L t0, __flush_cache_all
- jalr t0
- /* flush tlb entries */
-#ifdef CONFIG_SMP
- jal flush_tlb_all
-#else
- jal local_flush_tlb_all
-#endif
PTR_LA t0, saved_regs
PTR_L ra, PT_R31(t0)
PTR_L sp, PT_R29(t0)
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 4a500e8..51d3a4f 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/cpumask.h>
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 1bb692a..c1c8e40 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -18,6 +18,7 @@
#include <linux/ioport.h>
#include <linux/timex.h>
#include <linux/slab.h>
+#include <linux/smp.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index f10a7cd..6d0e59f 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/param.h>
+#include <linux/smp.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/mm.h>
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 6ae64e8..5e871e7 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/smp.h>
#include <asm/sn/types.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/hub.h>
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 690de06..ba59839 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/interrupt.h>
+#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/arch/mips/sibyte/common/cfe_console.c b/arch/mips/sibyte/common/cfe_console.c
index 81e3d54..1ad2da1 100644
--- a/arch/mips/sibyte/common/cfe_console.c
+++ b/arch/mips/sibyte/common/cfe_console.c
@@ -51,12 +51,13 @@
setleds("u0cn");
} else if (!strcmp(consdev, "uart1")) {
setleds("u1cn");
+ } else
#endif
#ifdef CONFIG_VGA_CONSOLE
- } else if (!strcmp(consdev, "pcconsole0")) {
- setleds("pccn");
-#endif
+ if (!strcmp(consdev, "pcconsole0")) {
+ setleds("pccn");
} else
+#endif
return -ENODEV;
}
return 0;
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index 69f5f88..0d9ec1a 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -1,5 +1,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/smp.h>
#include <linux/time.h>
#include <linux/clockchips.h>
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 24de6b9..bcebcef 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -38,14 +38,10 @@
_etext = .; /* End of text section */
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
+ EXCEPTION_TABLE(16)
BUG_TABLE
- RODATA
+ RO_DATA(PAGE_SIZE)
/* writeable */
.data : { /* Data */
@@ -53,27 +49,19 @@
CONSTRUCTORS
}
- . = ALIGN(PAGE_SIZE);
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
+ .data_nosave : { NOSAVE_DATA; }
- . = ALIGN(PAGE_SIZE);
- .data.page_aligned : { *(.data.idt) }
-
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+ .data.page_aligned : { PAGE_ALIGNED_DATA(PAGE_SIZE); }
+ .data.cacheline_aligned : { CACHELINE_ALIGNED_DATA(32); }
/* rarely changed data like cpu maps */
. = ALIGN(32);
.data.read_mostly : AT(ADDR(.data.read_mostly)) {
- *(.data.read_mostly)
+ READ_MOSTLY_DATA(32);
_edata = .; /* End of data section */
}
- . = ALIGN(THREAD_SIZE); /* init_task */
- .data.init_task : { *(.data.init_task) }
+ .data.init_task : { INIT_TASK(THREAD_SIZE); }
/* might get freed after init */
. = ALIGN(PAGE_SIZE);
@@ -88,23 +76,18 @@
__init_begin = .;
.init.text : {
_sinittext = .;
- *(.init.text)
+ INIT_TEXT;
_einittext = .;
}
- .init.data : { *(.init.data) }
- . = ALIGN(16);
- __setup_start = .;
- .setup.init : { KEEP(*(.init.setup)) }
- __setup_end = .;
+ .init.data : { INIT_DATA; }
+ .setup.init : { INIT_SETUP(16); }
__initcall_start = .;
.initcall.init : {
INITCALLS
}
__initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
+ .con_initcall.init : { CON_INITCALL; }
SECURITY_INIT
. = ALIGN(4);
@@ -114,28 +97,17 @@
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
- .exit.data : { *(.exit.data) }
+ .exit.text : { EXIT_TEXT; }
+ .exit.data : { EXIT_DATA; }
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
+ .init.ramfs : { INIT_RAM_FS; }
PERCPU(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss.page_aligned)
- *(.bss)
- }
- . = ALIGN(4);
- __bss_stop = .;
+ BSS(4)
_end = . ;
@@ -145,7 +117,7 @@
/* Sections to be discarded */
/DISCARD/ : {
- *(.exitcall.exit)
+ EXIT_CALL
}
STABS_DEBUG
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bf6cedf..d00131c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -62,7 +62,6 @@
config TRACE_IRQFLAGS_SUPPORT
bool
- depends on PPC64
default y
config LOCKDEP_SUPPORT
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 2f50acd..3d80c3e 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -36,3 +36,13 @@
zconf.h
zlib.h
zutil.h
+fdt.c
+fdt.h
+fdt_ro.c
+fdt_rw.c
+fdt_strerror.c
+fdt_sw.c
+fdt_wip.c
+libfdt.h
+libfdt_internal.h
+
diff --git a/arch/powerpc/boot/dts/amigaone.dts b/arch/powerpc/boot/dts/amigaone.dts
index 26549fc..49ac36b 100644
--- a/arch/powerpc/boot/dts/amigaone.dts
+++ b/arch/powerpc/boot/dts/amigaone.dts
@@ -70,8 +70,8 @@
devsel-speed = <0x00000001>;
min-grant = <0>;
max-latency = <0>;
- /* First 64k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */
- ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00010000>;
+ /* First 4k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */
+ ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>;
interrupt-parent = <&i8259>;
#interrupt-cells = <2>;
#address-cells = <2>;
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index a8dcb01..a680165 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -253,6 +253,7 @@
/* Filled in by U-Boot */
clock-frequency = <0>;
status = "disabled";
+ sdhci,1-bit-only;
};
crypto@30000 {
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 2ff7987..7685ffd 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -598,8 +598,6 @@
#define CICR_IEN ((uint)0x00000080) /* Int. enable */
#define CICR_SPS ((uint)0x00000001) /* SCC Spread */
-#define IMAP_ADDR (get_immrbase())
-
#define CPM_PIN_INPUT 0
#define CPM_PIN_OUTPUT 1
#define CPM_PIN_PRIMARY 0
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 3d9e887..b44aaab 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -309,7 +309,9 @@
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
+
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
size, direction);
}
@@ -320,7 +322,9 @@
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_device(dev, dma_handle,
+
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(dev, dma_handle,
0, size, direction);
}
@@ -331,7 +335,9 @@
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
+
+ if (dma_ops->sync_sg_for_cpu)
+ dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
}
static inline void dma_sync_sg_for_device(struct device *dev,
@@ -341,7 +347,9 @@
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
+
+ if (dma_ops->sync_sg_for_device)
+ dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -351,7 +359,9 @@
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_cpu(dev, dma_handle,
+
+ if (dma_ops->sync_single_range_for_cpu)
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle,
offset, size, direction);
}
@@ -362,7 +372,9 @@
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops);
- dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
+
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
size, direction);
}
#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 684a73f..a74c4ee 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -22,9 +22,7 @@
#ifdef __KERNEL__
-#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/highmem.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -62,6 +60,9 @@
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
+extern void *kmap_atomic_prot(struct page *page, enum km_type type,
+ pgprot_t prot);
+extern void kunmap_atomic(void *kvaddr, enum km_type type);
static inline void *kmap(struct page *page)
{
@@ -79,62 +80,11 @@
kunmap_high(page);
}
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
-{
- unsigned int idx;
- unsigned long vaddr;
-
- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- debug_kmap_atomic(type);
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
- __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
- local_flush_tlb_page(NULL, vaddr);
-
- return (void*) vaddr;
-}
-
static inline void *kmap_atomic(struct page *page, enum km_type type)
{
return kmap_atomic_prot(page, type, kmap_prot);
}
-static inline void kunmap_atomic(void *kvaddr, enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- return;
- }
-
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
-#endif
- pagefault_enable();
-}
-
static inline struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long) ptr;
@@ -148,6 +98,7 @@
return pte_page(*pte);
}
+
#define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 867ab8e..8b505ea 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -68,13 +68,13 @@
#if defined(CONFIG_BOOKE)
#define SET_MSR_EE(x) mtmsr(x)
-#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
+#define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
#else
#define SET_MSR_EE(x) mtmsr(x)
-#define local_irq_restore(flags) mtmsr(flags)
+#define raw_local_irq_restore(flags) mtmsr(flags)
#endif
-static inline void local_irq_disable(void)
+static inline void raw_local_irq_disable(void)
{
#ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 0": : :"memory");
@@ -86,7 +86,7 @@
#endif
}
-static inline void local_irq_enable(void)
+static inline void raw_local_irq_enable(void)
{
#ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 1": : :"memory");
@@ -98,7 +98,7 @@
#endif
}
-static inline void local_irq_save_ptr(unsigned long *flags)
+static inline void raw_local_irq_save_ptr(unsigned long *flags)
{
unsigned long msr;
msr = mfmsr();
@@ -110,12 +110,12 @@
#endif
}
-#define local_save_flags(flags) ((flags) = mfmsr())
-#define local_irq_save(flags) local_irq_save_ptr(&flags)
-#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+#define raw_local_save_flags(flags) ((flags) = mfmsr())
+#define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
+#define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
-#define hard_irq_enable() local_irq_enable()
-#define hard_irq_disable() local_irq_disable()
+#define hard_irq_disable() raw_local_irq_disable()
static inline int irqs_disabled_flags(unsigned long flags)
{
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index e05d26f..82b72207 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,8 @@
* generic accessors and iterators here
*/
#define __real_pte(e,p) ((real_pte_t) { \
- (e), pte_val(*((p) + PTRS_PER_PTE)) })
+ (e), ((e) & _PAGE_COMBO) ? \
+ (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
#define __rpte_to_pte(r) ((r).pte)
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 01c1233..168fce7 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -58,7 +58,7 @@
unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */
unsigned long size;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct rtas_args args;
struct device_node *dev; /* virtual address pointer */
};
@@ -245,5 +245,8 @@
(devfn << 8) | (reg & 0xff);
}
+extern void __cpuinit rtas_give_timebase(void);
+extern void __cpuinit rtas_take_timebase(void);
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4dd38f1..3cadba6 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -191,11 +191,49 @@
mflr r9
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ lis r12,reenable_mmu@h
+ ori r12,r12,reenable_mmu@l
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r10
+ SYNC
+ RFI
+reenable_mmu: /* re-enable mmu so we can */
+ mfmsr r10
+ lwz r12,_MSR(r1)
+ xor r10,r10,r12
+ andi. r10,r10,MSR_EE /* Did EE change? */
+ beq 1f
+
+ /* Save handler and return address into the 2 unused words
+ * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
+ * else can be recovered from the pt_regs except r3 which for
+ * normal interrupts has been set to pt_regs and for syscalls
+ * is an argument, so we temporarily use ORIG_GPR3 to save it
+ */
+ stw r9,8(r1)
+ stw r11,12(r1)
+ stw r3,ORIG_GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r0,GPR0(r1)
+ lwz r3,ORIG_GPR3(r1)
+ lwz r4,GPR4(r1)
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ lwz r9,8(r1)
+ lwz r11,12(r1)
+1: mtctr r11
+ mtlr r9
+ bctr /* jump to handler */
+#else /* CONFIG_TRACE_IRQFLAGS */
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined (CONFIG_6xx) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING
@@ -251,6 +289,31 @@
#ifdef SHOW_SYSCALLS
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Return from syscalls can (and generally will) hard enable
+ * interrupts. You aren't supposed to call a syscall with
+ * interrupts disabled in the first place. However, to ensure
+ * that we get it right vs. lockdep if it happens, we force
+ * that hard enable here with appropriate tracing if we see
+ * that we have been called with interrupts off
+ */
+ mfmsr r11
+ andi. r12,r11,MSR_EE
+ bne+ 1f
+ /* We came in with interrupts disabled, we enable them now */
+ bl trace_hardirqs_on
+ mfmsr r11
+ lwz r0,GPR0(r1)
+ lwz r3,GPR3(r1)
+ lwz r4,GPR4(r1)
+ ori r11,r11,MSR_EE
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ mtmsr r11
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
@@ -275,6 +338,7 @@
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
/* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ /* Note: We don't bother telling lockdep about it */
SYNC
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
@@ -288,6 +352,19 @@
oris r11,r11,0x1000 /* Set SO bit in CR */
stw r11,_CCR(r1)
syscall_exit_cont:
+ lwz r8,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* If we are going to return from the syscall with interrupts
+ * off, we trace that here. It shouldn't happen though but we
+ * want to catch the bugger if it does right ?
+ */
+ andi. r10,r8,MSR_EE
+ bne+ 1f
+ stw r3,GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r3,GPR3(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up. The internal
debug mode bit tells us that dbcr0 should be loaded. */
@@ -311,7 +388,6 @@
mtlr r4
mtcr r5
lwz r7,_NIP(r1)
- lwz r8,_MSR(r1)
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
@@ -394,7 +470,9 @@
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq ret_from_except
- /* Re-enable interrupts */
+ /* Re-enable interrupts. There is no need to trace that with
+ * lockdep as we are supposed to have IRQs on at this point
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10)
@@ -705,6 +783,7 @@
/* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt. */
+ /* Note: We don't bother telling lockdep about it */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC /* Some chip revs have problems here... */
MTMSRD(r10) /* disable interrupts */
@@ -744,11 +823,24 @@
beq+ restore
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep thinks irqs are enabled, we need to call
+ * preempt_schedule_irq with IRQs off, so we inform lockdep
+ * now that we -did- turn them off already
+ */
+ bl trace_hardirqs_off
+#endif
1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+ */
+ bl trace_hardirqs_on
+#endif
#else
resume_kernel:
#endif /* CONFIG_PREEMPT */
@@ -765,6 +857,28 @@
stw r6,icache_44x_need_flush@l(r4)
1:
#endif /* CONFIG_44x */
+
+ lwz r9,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep doesn't know about the fact that IRQs are temporarily turned
+ * off in this assembly code while peeking at TI_FLAGS() and such. However
+ * we need to inform it if the exception turned interrupts off, and we
+ * are about to trun them back on.
+ *
+ * The problem here sadly is that we don't know whether the exceptions was
+ * one that turned interrupts off or not. So we always tell lockdep about
+ * turning them on here when we go back to wherever we came from with EE
+ * on, even if that may meen some redudant calls being tracked. Maybe later
+ * we could encode what the exception did somewhere or test the exception
+ * type in the pt_regs but that sounds overkill
+ */
+ andi. r10,r9,MSR_EE
+ beq 1f
+ bl trace_hardirqs_on
+ lwz r9,_MSR(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
@@ -782,7 +896,6 @@
stwcx. r0,0,r1 /* to clear the reservation */
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
- lwz r9,_MSR(r1)
andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
@@ -805,7 +918,6 @@
MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart
exc_exit_restart:
- lwz r9,_MSR(r1)
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
mtspr SPRN_SRR0,r12
@@ -1035,11 +1147,18 @@
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
+ /* Note: We don't need to inform lockdep that we are enabling
+ * interrupts here. As far as it knows, they are already enabled
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
bl schedule
recheck:
+ /* Note: And we don't tell it we are disabling them again
+ * neither. Those disable/enable cycles used to peek at
+ * TI_FLAGS aren't advertised.
+ */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 4846946..fc21329 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -1124,9 +1124,8 @@
RFI
/*
- * Use the first pair of BAT registers to map the 1st 16MB
- * of RAM to PAGE_OFFSET. From this point on we can't safely
- * call OF any more.
+ * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
+ * (we keep one for debugging) and on others, we use one 256M BAT.
*/
initial_bats:
lis r11,PAGE_OFFSET@h
@@ -1136,12 +1135,16 @@
bne 4f
ori r11,r11,4 /* set up BAT registers for 601 */
li r8,0x7f /* valid, block length = 8MB */
- oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
- oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
mtspr SPRN_IBAT0L,r8 /* lower BAT register */
- mtspr SPRN_IBAT1U,r9
- mtspr SPRN_IBAT1L,r10
+ addis r11,r11,0x800000@h
+ addis r8,r8,0x800000@h
+ mtspr SPRN_IBAT1U,r11
+ mtspr SPRN_IBAT1L,r8
+ addis r11,r11,0x800000@h
+ addis r8,r8,0x800000@h
+ mtspr SPRN_IBAT2U,r11
+ mtspr SPRN_IBAT2L,r8
isync
blr
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index fa983a5..a359cb0 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -76,7 +76,7 @@
dev->dev.archdata.of_node = np;
if (bus_id)
- dev_set_name(&dev->dev, bus_id);
+ dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(dev);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3e7135b..892a9f2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -528,7 +528,7 @@
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
- printk("\n" KERN_INFO "GPR%02d: ", i);
+ printk("\nGPR%02d: ", i);
printk(REG " ", regs->gpr[i]);
if (i == LAST_VOLATILE && !FULL_REGS(regs))
break;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index ee4c760..c434823 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -38,9 +38,10 @@
#include <asm/syscalls.h>
#include <asm/smp.h>
#include <asm/atomic.h>
+#include <asm/time.h>
struct rtas_t rtas = {
- .lock = SPIN_LOCK_UNLOCKED
+ .lock = __RAW_SPIN_LOCK_UNLOCKED
};
EXPORT_SYMBOL(rtas);
@@ -67,6 +68,28 @@
void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL(rtas_flash_term_hook);
+/* RTAS use home made raw locking instead of spin_lock_irqsave
+ * because those can be called from within really nasty contexts
+ * such as having the timebase stopped which would lockup with
+ * normal locks and spinlock debugging enabled
+ */
+static unsigned long lock_rtas(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ __raw_spin_lock_flags(&rtas.lock, flags);
+ return flags;
+}
+
+static void unlock_rtas(unsigned long flags)
+{
+ __raw_spin_unlock(&rtas.lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
/*
* call_rtas_display_status and call_rtas_display_status_delay
* are designed only for very early low-level debugging, which
@@ -79,7 +102,7 @@
if (!rtas.base)
return;
- spin_lock_irqsave(&rtas.lock, s);
+ s = lock_rtas();
args->token = 10;
args->nargs = 1;
@@ -89,7 +112,7 @@
enter_rtas(__pa(args));
- spin_unlock_irqrestore(&rtas.lock, s);
+ unlock_rtas(s);
}
static void call_rtas_display_status_delay(char c)
@@ -411,8 +434,7 @@
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1;
- /* Gotta do something different here, use global lock for now... */
- spin_lock_irqsave(&rtas.lock, s);
+ s = lock_rtas();
rtas_args = &rtas.args;
rtas_args->token = token;
@@ -439,8 +461,7 @@
outputs[i] = rtas_args->rets[i+1];
ret = (nret > 0)? rtas_args->rets[0]: 0;
- /* Gotta do something different here, use global lock for now... */
- spin_unlock_irqrestore(&rtas.lock, s);
+ unlock_rtas(s);
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
@@ -837,7 +858,7 @@
buff_copy = get_errorlog_buffer();
- spin_lock_irqsave(&rtas.lock, flags);
+ flags = lock_rtas();
rtas.args = args;
enter_rtas(__pa(&rtas.args));
@@ -848,7 +869,7 @@
if (args.rets[0] == -1)
errbuf = __fetch_rtas_last_error(buff_copy);
- spin_unlock_irqrestore(&rtas.lock, flags);
+ unlock_rtas(flags);
if (buff_copy) {
if (errbuf)
@@ -951,3 +972,33 @@
/* break now */
return 1;
}
+
+static raw_spinlock_t timebase_lock;
+static u64 timebase = 0;
+
+void __cpuinit rtas_give_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+ __raw_spin_lock(&timebase_lock);
+ rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
+ timebase = get_tb();
+ __raw_spin_unlock(&timebase_lock);
+
+ while (timebase)
+ barrier();
+ rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
+ local_irq_restore(flags);
+}
+
+void __cpuinit rtas_take_timebase(void)
+{
+ while (!timebase)
+ barrier();
+ __raw_spin_lock(&timebase_lock);
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ __raw_spin_unlock(&timebase_lock);
+}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d15424..e1e3059 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -119,6 +119,8 @@
*/
notrace void __init machine_init(unsigned long dt_ptr)
{
+ lockdep_init();
+
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 65484b2..0b47de0 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -68,7 +68,8 @@
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
-static volatile unsigned int cpu_callin_map[NR_CPUS];
+/* Can't be static due to PowerMac hackery */
+volatile unsigned int cpu_callin_map[NR_CPUS];
int smt_enabled_at_boot = 1;
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 0362a89..acb74a1 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -219,7 +219,7 @@
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
#include <platforms/44x/44x.h>
-static int udbg_44x_as1_flush(void)
+static void udbg_44x_as1_flush(void)
{
if (udbg_comport) {
while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 2d2192e..3e68363 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -30,3 +30,4 @@
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
+obj-$(CONFIG_HIGHMEM) += highmem.o
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
new file mode 100644
index 0000000..c2186c7
--- /dev/null
+++ b/arch/powerpc/mm/highmem.c
@@ -0,0 +1,77 @@
+/*
+ * highmem.c: virtual kernel memory mappings for high memory
+ *
+ * PowerPC version, stolen from the i386 version.
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ *
+ * Reworked for PowerPC by various contributors. Moved from
+ * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
+ */
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+{
+ unsigned int idx;
+ unsigned long vaddr;
+
+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+#endif
+ __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
+ local_flush_tlb_page(NULL, vaddr);
+
+ return (void*) vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+
+ if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
+ pagefault_enable();
+ return;
+ }
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_page(NULL, vaddr);
+#endif
+ pagefault_enable();
+}
+EXPORT_SYMBOL(kunmap_atomic);
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 42e09a9..0362c88 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
#include <asm/machdep.h>
#include <asm/prom.h>
@@ -65,7 +66,6 @@
static u32 post_info;
-/* I am not sure this is the best place for this... */
static int __init warp_post_info(void)
{
struct device_node *np;
@@ -194,9 +194,9 @@
return 0;
}
-static void pika_setup_critical_temp(struct i2c_client *client)
+static void pika_setup_critical_temp(struct device_node *np,
+ struct i2c_client *client)
{
- struct device_node *np;
int irq, rc;
/* Do this before enabling critical temp interrupt since we
@@ -208,14 +208,7 @@
i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */
i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */
- np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
- if (np == NULL) {
- printk(KERN_ERR __FILE__ ": Unable to find ad7414\n");
- return;
- }
-
irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if (irq == NO_IRQ) {
printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n");
return;
@@ -244,32 +237,24 @@
static int pika_dtm_thread(void __iomem *fpga)
{
- struct i2c_adapter *adap;
+ struct device_node *np;
struct i2c_client *client;
- /* We loop in case either driver was compiled as a module and
- * has not been insmoded yet.
- */
- while (!(adap = i2c_get_adapter(0))) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
+ np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
+ if (np == NULL)
+ return -ENOENT;
+
+ client = of_find_i2c_device_by_node(np);
+ if (client == NULL) {
+ of_node_put(np);
+ return -ENOENT;
}
- while (1) {
- list_for_each_entry(client, &adap->clients, list)
- if (client->addr == 0x4a)
- goto found_it;
+ pika_setup_critical_temp(np, client);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ);
- }
+ of_node_put(np);
-found_it:
- pika_setup_critical_temp(client);
-
- i2c_put_adapter(adap);
-
- printk(KERN_INFO "PIKA DTM thread running.\n");
+ printk(KERN_INFO "Warp DTM thread running.\n");
while (!kthread_should_stop()) {
int val;
@@ -291,7 +276,6 @@
return 0;
}
-
static int __init pika_dtm_start(void)
{
struct task_struct *dtm_thread;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 77f90b3..60ed9c0 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -285,6 +285,7 @@
{ .type = "qe", },
{ .compatible = "fsl,qe", },
{ .compatible = "gianfar", },
+ { .compatible = "fsl,rapidio-delta", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index cc0b0db..62c592ede6 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -52,20 +52,19 @@
pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
- local_irq_save(flags);
-
np = of_get_cpu_node(nr, NULL);
cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
if (cpu_rel_addr == NULL) {
printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
- local_irq_restore(flags);
return;
}
/* Map the spin table */
bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+ local_irq_save(flags);
+
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
@@ -73,10 +72,10 @@
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1);
- iounmap(bptr_vaddr);
-
local_irq_restore(flags);
+ iounmap(bptr_vaddr);
+
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
}
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index d0e8443..747d8fb 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -102,10 +102,11 @@
{},
};
-static void __init socrates_init(void)
+static int __init socrates_publish_devices(void)
{
- of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL);
+ return of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL);
}
+machine_device_initcall(socrates, socrates_publish_devices);
/*
* Called very early, device-tree isn't unflattened
@@ -124,7 +125,6 @@
.name = "Socrates",
.probe = socrates_probe,
.setup_arch = socrates_setup_arch,
- .init = socrates_init,
.init_IRQ = socrates_pic_init,
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index ee01532..1b42605 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -32,7 +32,6 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
-#include <linux/of_platform.h>
/* A few bit definitions needed for fixups on some boards */
#define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 9046803..bc97fad 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -36,7 +36,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
@@ -140,31 +139,6 @@
mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
}
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned long timebase = 0;
-
-static void __devinit cell_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase = get_tb();
- spin_unlock(&timebase_lock);
-
- while (timebase)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-static void __devinit cell_take_timebase(void)
-{
- while (!timebase)
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase >> 32, timebase & 0xffffffff);
- timebase = 0;
- spin_unlock(&timebase_lock);
-}
-
static void __devinit smp_cell_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -224,8 +198,8 @@
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = cell_give_timebase;
- smp_ops->take_timebase = cell_take_timebase;
+ smp_ops->give_timebase = rtas_give_timebase;
+ smp_ops->take_timebase = rtas_take_timebase;
}
DBG(" <- smp_init_cell()\n");
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index 10a4a4d..02cafec 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -26,7 +26,6 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
@@ -42,40 +41,12 @@
mpic_setup_this_cpu();
}
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned int timebase_upper = 0, timebase_lower = 0;
-
-void __devinit smp_chrp_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase_upper = get_tbu();
- timebase_lower = get_tbl();
- spin_unlock(&timebase_lock);
-
- while (timebase_upper || timebase_lower)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-void __devinit smp_chrp_take_timebase(void)
-{
- while (!(timebase_upper || timebase_lower))
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase_upper, timebase_lower);
- timebase_upper = 0;
- timebase_lower = 0;
- spin_unlock(&timebase_lock);
- printk("CPU %i taken timebase\n", smp_processor_id());
-}
-
/* CHRP with openpic */
struct smp_ops_t chrp_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_chrp_kick_cpu,
.setup_cpu = smp_chrp_setup_cpu,
- .give_timebase = smp_chrp_give_timebase,
- .take_timebase = smp_chrp_take_timebase,
+ .give_timebase = rtas_give_timebase,
+ .take_timebase = rtas_take_timebase,
};
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 153051e..a461934 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -71,20 +71,25 @@
}
#ifdef CONFIG_SMP
-static DEFINE_SPINLOCK(timebase_lock);
+static raw_spinlock_t timebase_lock;
static unsigned long timebase;
static void __devinit pas_give_timebase(void)
{
- spin_lock(&timebase_lock);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+ __raw_spin_lock(&timebase_lock);
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
isync();
timebase = get_tb();
- spin_unlock(&timebase_lock);
+ __raw_spin_unlock(&timebase_lock);
while (timebase)
barrier();
mtspr(SPRN_TBCTL, TBCTL_RESTART);
+ local_irq_restore(flags);
}
static void __devinit pas_take_timebase(void)
@@ -92,10 +97,10 @@
while (!timebase)
smp_rmb();
- spin_lock(&timebase_lock);
+ __raw_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
- spin_unlock(&timebase_lock);
+ __raw_spin_unlock(&timebase_lock);
}
struct smp_ops_t pas_smp_ops = {
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 86f69a4..c205226 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -103,11 +103,6 @@
EXPORT_SYMBOL(smu_cmdbuf_abs);
#endif
-#ifdef CONFIG_SMP
-extern struct smp_ops_t psurge_smp_ops;
-extern struct smp_ops_t core99_smp_ops;
-#endif /* CONFIG_SMP */
-
static void pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
@@ -341,34 +336,6 @@
ROOT_DEV = DEFAULT_ROOT_DEVICE;
#endif
-#ifdef CONFIG_SMP
- /* Check for Core99 */
- ic = of_find_node_by_name(NULL, "uni-n");
- if (!ic)
- ic = of_find_node_by_name(NULL, "u3");
- if (!ic)
- ic = of_find_node_by_name(NULL, "u4");
- if (ic) {
- of_node_put(ic);
- smp_ops = &core99_smp_ops;
- }
-#ifdef CONFIG_PPC32
- else {
- /*
- * We have to set bits in cpu_possible_map here since the
- * secondary CPU(s) aren't in the device tree, and
- * setup_per_cpu_areas only allocates per-cpu data for
- * CPUs in the cpu_possible_map.
- */
- int cpu;
-
- for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
- cpu_set(cpu, cpu_possible_map);
- smp_ops = &psurge_smp_ops;
- }
-#endif
-#endif /* CONFIG_SMP */
-
#ifdef CONFIG_ADB
if (strstr(cmd_line, "adb_sync")) {
extern int __adb_probe_sync;
@@ -512,6 +479,14 @@
#ifdef CONFIG_PPC64
iommu_init_early_dart();
#endif
+
+ /* SMP Init has to be done early as we need to patch up
+ * cpu_possible_map before interrupt stacks are allocated
+ * or kaboom...
+ */
+#ifdef CONFIG_SMP
+ pmac_setup_smp();
+#endif
}
static int __init pmac_declare_of_platform_devices(void)
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index cf1dbe7..6d4da7b 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -64,10 +64,11 @@
extern void __secondary_start_pmac_0(void);
extern int pmac_pfunc_base_install(void);
-#ifdef CONFIG_PPC32
+static void (*pmac_tb_freeze)(int freeze);
+static u64 timebase;
+static int tb_req;
-/* Sync flag for HW tb sync */
-static volatile int sec_tb_reset = 0;
+#ifdef CONFIG_PPC32
/*
* Powersurge (old powermac SMP) support.
@@ -294,6 +295,9 @@
psurge_quad_init();
/* All released cards using this HW design have 4 CPUs */
ncpus = 4;
+ /* No sure how timebase sync works on those, let's use SW */
+ smp_ops->give_timebase = smp_generic_give_timebase;
+ smp_ops->take_timebase = smp_generic_take_timebase;
} else {
iounmap(quad_base);
if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
@@ -308,18 +312,15 @@
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
- /*
- * This is necessary because OF doesn't know about the
+ /* This is necessary because OF doesn't know about the
* secondary cpu(s), and thus there aren't nodes in the
* device tree for them, and smp_setup_cpu_maps hasn't
- * set their bits in cpu_possible_map and cpu_present_map.
+ * set their bits in cpu_present_map.
*/
if (ncpus > NR_CPUS)
ncpus = NR_CPUS;
- for (i = 1; i < ncpus ; ++i) {
+ for (i = 1; i < ncpus ; ++i)
cpu_set(i, cpu_present_map);
- set_hard_smp_processor_id(i, i);
- }
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
@@ -329,8 +330,14 @@
static void __init smp_psurge_kick_cpu(int nr)
{
unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
- unsigned long a;
- int i;
+ unsigned long a, flags;
+ int i, j;
+
+ /* Defining this here is evil ... but I prefer hiding that
+ * crap to avoid giving people ideas that they can do the
+ * same.
+ */
+ extern volatile unsigned int cpu_callin_map[NR_CPUS];
/* may need to flush here if secondary bats aren't setup */
for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
@@ -339,47 +346,52 @@
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
+ /* This is going to freeze the timeebase, we disable interrupts */
+ local_irq_save(flags);
+
out_be32(psurge_start, start);
mb();
psurge_set_ipi(nr);
+
/*
* We can't use udelay here because the timebase is now frozen.
*/
for (i = 0; i < 2000; ++i)
- barrier();
+ asm volatile("nop" : : : "memory");
psurge_clr_ipi(nr);
- if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
-}
-
-/*
- * With the dual-cpu powersurge board, the decrementers and timebases
- * of both cpus are frozen after the secondary cpu is started up,
- * until we give the secondary cpu another interrupt. This routine
- * uses this to get the timebases synchronized.
- * -- paulus.
- */
-static void __init psurge_dual_sync_tb(int cpu_nr)
-{
- int t;
-
- set_dec(tb_ticks_per_jiffy);
- /* XXX fixme */
- set_tb(0, 0);
-
- if (cpu_nr > 0) {
- mb();
- sec_tb_reset = 1;
- return;
+ /*
+ * Also, because the timebase is frozen, we must not return to the
+ * caller which will try to do udelay's etc... Instead, we wait -here-
+ * for the CPU to callin.
+ */
+ for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
+ for (j = 1; j < 10000; j++)
+ asm volatile("nop" : : : "memory");
+ asm volatile("sync" : : : "memory");
}
+ if (!cpu_callin_map[nr])
+ goto stuck;
- /* wait for the secondary to have reset its TB before proceeding */
- for (t = 10000000; t > 0 && !sec_tb_reset; --t)
- ;
+ /* And we do the TB sync here too for standard dual CPU cards */
+ if (psurge_type == PSURGE_DUAL) {
+ while(!tb_req)
+ barrier();
+ tb_req = 0;
+ mb();
+ timebase = get_tb();
+ mb();
+ while (timebase)
+ barrier();
+ mb();
+ }
+ stuck:
+ /* now interrupt the secondary, restarting both TBs */
+ if (psurge_type == PSURGE_DUAL)
+ psurge_set_ipi(1);
- /* now interrupt the secondary, starting both TBs */
- psurge_set_ipi(1);
+ if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
}
static struct irqaction psurge_irqaction = {
@@ -390,36 +402,35 @@
static void __init smp_psurge_setup_cpu(int cpu_nr)
{
+ if (cpu_nr != 0)
+ return;
- if (cpu_nr == 0) {
- /* If we failed to start the second CPU, we should still
- * send it an IPI to start the timebase & DEC or we might
- * have them stuck.
- */
- if (num_online_cpus() < 2) {
- if (psurge_type == PSURGE_DUAL)
- psurge_set_ipi(1);
- return;
- }
- /* reset the entry point so if we get another intr we won't
- * try to startup again */
- out_be32(psurge_start, 0x100);
- if (setup_irq(30, &psurge_irqaction))
- printk(KERN_ERR "Couldn't get primary IPI interrupt");
- }
-
- if (psurge_type == PSURGE_DUAL)
- psurge_dual_sync_tb(cpu_nr);
+ /* reset the entry point so if we get another intr we won't
+ * try to startup again */
+ out_be32(psurge_start, 0x100);
+ if (setup_irq(30, &psurge_irqaction))
+ printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
void __init smp_psurge_take_timebase(void)
{
- /* Dummy implementation */
+ if (psurge_type != PSURGE_DUAL)
+ return;
+
+ tb_req = 1;
+ mb();
+ while (!timebase)
+ barrier();
+ mb();
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ mb();
+ set_dec(tb_ticks_per_jiffy/2);
}
void __init smp_psurge_give_timebase(void)
{
- /* Dummy implementation */
+ /* Nothing to do here */
}
/* PowerSurge-style Macs */
@@ -437,9 +448,6 @@
* Core 99 and later support
*/
-static void (*pmac_tb_freeze)(int freeze);
-static u64 timebase;
-static int tb_req;
static void smp_core99_give_timebase(void)
{
@@ -478,7 +486,6 @@
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
mb();
- set_dec(tb_ticks_per_jiffy/2);
local_irq_restore(flags);
}
@@ -920,3 +927,34 @@
# endif
#endif
};
+
+void __init pmac_setup_smp(void)
+{
+ struct device_node *np;
+
+ /* Check for Core99 */
+ np = of_find_node_by_name(NULL, "uni-n");
+ if (!np)
+ np = of_find_node_by_name(NULL, "u3");
+ if (!np)
+ np = of_find_node_by_name(NULL, "u4");
+ if (np) {
+ of_node_put(np);
+ smp_ops = &core99_smp_ops;
+ }
+#ifdef CONFIG_PPC32
+ else {
+ /* We have to set bits in cpu_possible_map here since the
+ * secondary CPU(s) aren't in the device tree. Various
+ * things won't be initialized for CPUs not in the possible
+ * map, so we really need to fix it up here.
+ */
+ int cpu;
+
+ for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
+ cpu_set(cpu, cpu_possible_map);
+ smp_ops = &psurge_smp_ops;
+ }
+#endif /* CONFIG_PPC32 */
+}
+
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 9a2a6e3..0e8db67 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -122,7 +122,7 @@
* passed back in "userdata".
*/
-static void eeh_report_error(struct pci_dev *dev, void *userdata)
+static int eeh_report_error(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
@@ -130,19 +130,21 @@
dev->error_state = pci_channel_io_frozen;
if (!driver)
- return;
+ return 0;
eeh_disable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->error_detected)
- return;
+ return 0;
rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
+ return 0;
}
/**
@@ -153,7 +155,7 @@
* Cumulative response passed back in "userdata".
*/
-static void eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
@@ -161,26 +163,28 @@
if (!driver ||
!driver->err_handler ||
!driver->err_handler->mmio_enabled)
- return;
+ return 0;
rc = driver->err_handler->mmio_enabled (dev);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
+ return 0;
}
/**
* eeh_report_reset - tell device that slot has been reset
*/
-static void eeh_report_reset(struct pci_dev *dev, void *userdata)
+static int eeh_report_reset(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
if (!driver)
- return;
+ return 0;
dev->error_state = pci_channel_io_normal;
@@ -188,35 +192,39 @@
if (!driver->err_handler ||
!driver->err_handler->slot_reset)
- return;
+ return 0;
rc = driver->err_handler->slot_reset(dev);
if ((*res == PCI_ERS_RESULT_NONE) ||
(*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
if (*res == PCI_ERS_RESULT_DISCONNECT &&
rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+
+ return 0;
}
/**
* eeh_report_resume - tell device to resume normal operations
*/
-static void eeh_report_resume(struct pci_dev *dev, void *userdata)
+static int eeh_report_resume(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
dev->error_state = pci_channel_io_normal;
if (!driver)
- return;
+ return 0;
eeh_enable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->resume)
- return;
+ return 0;
driver->err_handler->resume(dev);
+
+ return 0;
}
/**
@@ -226,22 +234,24 @@
* dead, and that no further recovery attempts will be made on it.
*/
-static void eeh_report_failure(struct pci_dev *dev, void *userdata)
+static int eeh_report_failure(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
dev->error_state = pci_channel_io_perm_failure;
if (!driver)
- return;
+ return 0;
eeh_disable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->error_detected)
- return;
+ return 0;
driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+
+ return 0;
}
/* ------------------------------------------------------- */
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 1a231c3..1f8f6cf 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -35,7 +35,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
-#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
@@ -118,31 +117,6 @@
}
#endif /* CONFIG_XICS */
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned long timebase = 0;
-
-static void __devinit pSeries_give_timebase(void)
-{
- spin_lock(&timebase_lock);
- rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
- timebase = get_tb();
- spin_unlock(&timebase_lock);
-
- while (timebase)
- barrier();
- rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
-}
-
-static void __devinit pSeries_take_timebase(void)
-{
- while (!timebase)
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase >> 32, timebase & 0xffffffff);
- timebase = 0;
- spin_unlock(&timebase_lock);
-}
-
static void __devinit smp_pSeries_kick_cpu(int nr)
{
BUG_ON(nr < 0 || nr >= NR_CPUS);
@@ -209,8 +183,8 @@
/* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = pSeries_give_timebase;
- smp_ops->take_timebase = pSeries_take_timebase;
+ smp_ops->give_timebase = rtas_give_timebase;
+ smp_ops->take_timebase = rtas_take_timebase;
}
pr_debug(" <- smp_init_pSeries()\n");
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 9c3af50..d46de1f 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -279,28 +279,29 @@
}
#ifdef CONFIG_PPC_DCR
-static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
+static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node,
+ struct mpic_reg_bank *rb,
unsigned int offset, unsigned int size)
{
const u32 *dbasep;
- dbasep = of_get_property(mpic->irqhost->of_node, "dcr-reg", NULL);
+ dbasep = of_get_property(node, "dcr-reg", NULL);
- rb->dhost = dcr_map(mpic->irqhost->of_node, *dbasep + offset, size);
+ rb->dhost = dcr_map(node, *dbasep + offset, size);
BUG_ON(!DCR_MAP_OK(rb->dhost));
}
-static inline void mpic_map(struct mpic *mpic, phys_addr_t phys_addr,
- struct mpic_reg_bank *rb, unsigned int offset,
- unsigned int size)
+static inline void mpic_map(struct mpic *mpic, struct device_node *node,
+ phys_addr_t phys_addr, struct mpic_reg_bank *rb,
+ unsigned int offset, unsigned int size)
{
if (mpic->flags & MPIC_USES_DCR)
- _mpic_map_dcr(mpic, rb, offset, size);
+ _mpic_map_dcr(mpic, node, rb, offset, size);
else
_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
}
#else /* CONFIG_PPC_DCR */
-#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
+#define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
#endif /* !CONFIG_PPC_DCR */
@@ -1052,11 +1053,10 @@
int intvec_top;
u64 paddr = phys_addr;
- mpic = alloc_bootmem(sizeof(struct mpic));
+ mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
if (mpic == NULL)
return NULL;
-
- memset(mpic, 0, sizeof(struct mpic));
+
mpic->name = name;
mpic->hc_irq = mpic_irq_chip;
@@ -1152,8 +1152,8 @@
}
/* Map the global registers */
- mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
- mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
+ mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+ mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
/* Reset */
if (flags & MPIC_WANTS_RESET) {
@@ -1194,7 +1194,7 @@
/* Map the per-CPU registers */
for (i = 0; i < mpic->num_cpus; i++) {
- mpic_map(mpic, paddr, &mpic->cpuregs[i],
+ mpic_map(mpic, node, paddr, &mpic->cpuregs[i],
MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
0x1000);
}
@@ -1202,7 +1202,7 @@
/* Initialize main ISU if none provided */
if (mpic->isu_size == 0) {
mpic->isu_size = mpic->num_sources;
- mpic_map(mpic, paddr, &mpic->isus[0],
+ mpic_map(mpic, node, paddr, &mpic->isus[0],
MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
}
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
@@ -1256,8 +1256,10 @@
BUG_ON(isu_num >= MPIC_MAX_ISU);
- mpic_map(mpic, paddr, &mpic->isus[isu_num], 0,
+ mpic_map(mpic, mpic->irqhost->of_node,
+ paddr, &mpic->isus[isu_num], 0,
MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
+
if ((isu_first + mpic->isu_size) > mpic->num_sources)
mpic->num_sources = isu_first + mpic->isu_size;
}
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index b28b0e5..237e365 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -112,6 +112,7 @@
{
unsigned long flags;
u8 mcn_shift = 0, dev_shift = 0;
+ u32 ret;
spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) {
@@ -139,11 +140,13 @@
}
/* wait for the QE_CR_FLG to clear */
- while(in_be32(&qe_immr->cp.cecr) & QE_CR_FLG)
- cpu_relax();
+ ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
+ 100, 0);
+ /* On timeout (e.g. failure), the expression will be false (ret == 0),
+ otherwise it will be true (ret == 1). */
spin_unlock_irqrestore(&qe_lock, flags);
- return 0;
+ return ret == 1;
}
EXPORT_SYMBOL(qe_issue_cmd);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a14dba0..e577839 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -94,6 +94,7 @@
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
+ select HAVE_PERF_COUNTERS
source "init/Kconfig"
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index d401d56..fcba206 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc3
-# Thu Apr 23 09:29:52 2009
+# Linux kernel version: 2.6.30
+# Mon Jun 22 11:08:16 2009
#
CONFIG_SCHED_MC=y
CONFIG_MMU=y
@@ -25,6 +25,7 @@
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_S390=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -90,7 +91,6 @@
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -103,7 +103,14 @@
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_COUNTERS=y
+
+#
+# Performance Counters
+#
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
@@ -119,6 +126,11 @@
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
@@ -150,7 +162,7 @@
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
CONFIG_PREEMPT_NOTIFIERS=y
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# Base setup
@@ -199,6 +211,7 @@
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -218,9 +231,9 @@
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
# I/O subsystem configuration
@@ -257,6 +270,16 @@
# CONFIG_ZFCPDUMP is not set
CONFIG_S390_GUEST=y
CONFIG_SECCOMP=y
+
+#
+# Power Management
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_SLEEP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
CONFIG_NET=y
#
@@ -384,6 +407,7 @@
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
#
@@ -446,6 +470,7 @@
# CAN Device Drivers
#
CONFIG_CAN_VCAN=m
+# CONFIG_CAN_DEV is not set
# CONFIG_CAN_DEBUG_DEVICES is not set
# CONFIG_AF_RXRPC is not set
# CONFIG_WIMAX is not set
@@ -524,10 +549,6 @@
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
@@ -578,7 +599,6 @@
# CONFIG_DM_DELAY is not set
# CONFIG_DM_UEVENT is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_IFB is not set
CONFIG_DUMMY=m
CONFIG_BONDING=m
@@ -595,6 +615,7 @@
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
# CONFIG_TR is not set
@@ -674,6 +695,11 @@
# CONFIG_MONREADER is not set
CONFIG_MONWRITER=m
CONFIG_S390_VMUR=m
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
@@ -683,6 +709,10 @@
# CONFIG_NEW_LEDS is not set
CONFIG_ACCESSIBILITY=y
# CONFIG_AUXDISPLAY is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
@@ -703,11 +733,12 @@
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -865,19 +896,23 @@
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_FTRACE_SYSCALLS=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -886,6 +921,7 @@
CONFIG_SAMPLES=y
# CONFIG_SAMPLE_KOBJECT is not set
# CONFIG_SAMPLE_KPROBES is not set
+# CONFIG_KMEMCHECK is not set
#
# Security options
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index ec917d4..7a3817a 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -178,7 +178,7 @@
}
struct s390_idle_data {
- spinlock_t lock;
+ unsigned int sequence;
unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_time;
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 9450ce6..31ed568 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -248,14 +248,5 @@
#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#endif /* DASD_DEBUG */
-#undef DEBUG_MALLOC
-#ifdef DEBUG_MALLOC
-void *b;
-#define kmalloc(x...) (PRINT_INFO(" kmalloc %p\n",b=kmalloc(x)),b)
-#define kfree(x) PRINT_INFO(" kfree %p\n",x);kfree(x)
-#define get_zeroed_page(x...) (PRINT_INFO(" gfp %p\n",b=get_zeroed_page(x)),b)
-#define __get_free_pages(x...) (PRINT_INFO(" gfps %p\n",b=__get_free_pages(x)),b)
-#endif /* DEBUG_MALLOC */
-
#endif /* __KERNEL__ */
#endif /* DEBUG_H */
diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h
new file mode 100644
index 0000000..a7205a3
--- /dev/null
+++ b/arch/s390/include/asm/perf_counter.h
@@ -0,0 +1,8 @@
+/*
+ * Performance counter support - s390 specific definitions.
+ *
+ * Copyright 2009 Martin Schwidefsky, IBM Corporation.
+ */
+
+static inline void set_perf_counter_pending(void) {}
+static inline void clear_perf_counter_pending(void) {}
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 402d6dc..79d849f 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -380,7 +380,7 @@
extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count);
+ int q_nr, unsigned int bufnr, unsigned int count);
extern int qdio_cleanup(struct ccw_device*, int);
extern int qdio_shutdown(struct ccw_device*, int);
extern int qdio_free(struct ccw_device *);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 9bb2f62..86783ef 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -154,39 +154,35 @@
static int __kprobes swap_instruction(void *aref)
{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long status = kcb->kprobe_status;
struct ins_replace_args *args = aref;
+ int rc;
- return probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
+ kcb->kprobe_status = KPROBE_SWAP_INST;
+ rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
+ kcb->kprobe_status = status;
+ return rc;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long status = kcb->kprobe_status;
struct ins_replace_args args;
args.ptr = p->addr;
args.old = p->opcode;
args.new = BREAKPOINT_INSTRUCTION;
-
- kcb->kprobe_status = KPROBE_SWAP_INST;
stop_machine(swap_instruction, &args, NULL);
- kcb->kprobe_status = status;
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long status = kcb->kprobe_status;
struct ins_replace_args args;
args.ptr = p->addr;
args.old = BREAKPOINT_INSTRUCTION;
args.new = p->opcode;
-
- kcb->kprobe_status = KPROBE_SWAP_INST;
stop_machine(swap_instruction, &args, NULL);
- kcb->kprobe_status = status;
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index fd8e311..2270730 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -856,13 +856,20 @@
{
struct s390_idle_data *idle;
unsigned long long idle_count;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock(&idle->lock);
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_count = idle->idle_count;
if (idle->idle_enter)
idle_count++;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return sprintf(buf, "%llu\n", idle_count);
}
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -872,15 +879,22 @@
{
struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock(&idle->lock);
now = get_clock();
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_time = idle->idle_time;
idle_enter = idle->idle_enter;
if (idle_enter != 0ULL && idle_enter < now)
idle_time += now - idle_enter;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -908,11 +922,7 @@
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
idle = &per_cpu(s390_idle, cpu);
- spin_lock_irq(&idle->lock);
- idle->idle_enter = 0;
- idle->idle_time = 0;
- idle->idle_count = 0;
- spin_unlock_irq(&idle->lock);
+ memset(idle, 0, sizeof(struct s390_idle_data));
if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
return NOTIFY_BAD;
break;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 215330a..d4c8e9c 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -36,7 +36,6 @@
#include <linux/notifier.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
-#include <linux/bootmem.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
@@ -62,9 +61,6 @@
u64 sched_clock_base_cc = -1; /* Force to data section. */
-static ext_int_info_t ext_int_info_cc;
-static ext_int_info_t ext_int_etr_cc;
-
static DEFINE_PER_CPU(struct clock_event_device, comparators);
/*
@@ -255,15 +251,11 @@
stp_reset();
/* request the clock comparator external interrupt */
- if (register_early_external_interrupt(0x1004,
- clock_comparator_interrupt,
- &ext_int_info_cc) != 0)
+ if (register_external_interrupt(0x1004, clock_comparator_interrupt))
panic("Couldn't request external interrupt 0x1004");
/* request the timing alert external interrupt */
- if (register_early_external_interrupt(0x1406,
- timing_alert_interrupt,
- &ext_int_etr_cc) != 0)
+ if (register_external_interrupt(0x1406, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
if (clocksource_register(&clocksource_tod) != 0)
@@ -1445,14 +1437,14 @@
{
int rc;
- stp_page = alloc_bootmem_pages(PAGE_SIZE);
+ stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
pr_warning("The real or virtual hardware system does "
"not provide an STP interface\n");
- free_bootmem((unsigned long) stp_page, PAGE_SIZE);
+ free_page((unsigned long) stp_page);
stp_page = NULL;
stp_online = 0;
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c8eb725..c41bb0d 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,13 +25,9 @@
#include <asm/irq_regs.h>
#include <asm/cputime.h>
-static ext_int_info_t ext_int_info_timer;
-
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
- .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
-};
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
static inline __u64 get_vtimer(void)
{
@@ -153,11 +149,13 @@
vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
}
- spin_lock(&idle->lock);
+ idle->sequence++;
+ smp_wmb();
idle->idle_time += idle_time;
idle->idle_enter = 0ULL;
idle->idle_count++;
- spin_unlock(&idle->lock);
+ smp_wmb();
+ idle->sequence++;
}
void vtime_stop_cpu(void)
@@ -244,15 +242,23 @@
{
struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, cpu);
- spin_lock(&idle->lock);
+
now = get_clock();
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_time = 0;
idle_enter = idle->idle_enter;
if (idle_enter != 0ULL && idle_enter < now)
idle_time = now - idle_enter;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return idle_time;
}
@@ -557,8 +563,7 @@
void __init vtime_init(void)
{
/* request the cpu timer external interrupt */
- if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
- &ext_int_info_timer) != 0)
+ if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
panic("Couldn't request external interrupt 0x1005");
/* Enable cpu timer interrupts on the boot cpu. */
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
index 3c74e7d..76d688d 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/power/swsusp_asm64.S
@@ -109,10 +109,11 @@
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
+#ifdef CONFIG_SMP
/* Save boot cpu number */
brasl %r14,smp_get_phys_cpu_id
lgr %r10,%r2
-
+#endif
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
@@ -177,11 +178,12 @@
/* Pointer to save arae */
lghi %r13,0x1000
+#ifdef CONFIG_SMP
/* Switch CPUs */
lgr %r2,%r10 /* get cpu id */
llgf %r3,0x318(%r13)
brasl %r14,smp_switch_boot_cpu_in_resume
-
+#endif
/* Restore prefix register */
spx 0x318(%r13)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ac1c620..e2bdd7b 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -15,7 +15,7 @@
select HAVE_IOREMAP_PROT if MMU
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
- select HAVE_PERF_COUNTER
+ select HAVE_PERF_COUNTERS
select RTC_LIB
select GENERIC_ATOMIC64
help
@@ -71,6 +71,9 @@
config GENERIC_IRQ_PROBE
def_bool y
+config IRQ_PER_CPU
+ def_bool y
+
config GENERIC_GPIO
def_bool n
@@ -151,6 +154,9 @@
config ARCH_HAS_DEFAULT_IDLE
def_bool y
+config ARCH_HAS_CPU_IDLE_WAIT
+ def_bool y
+
config IO_TRAPPED
bool
@@ -411,6 +417,8 @@
select CPU_HAS_PTEAEX
select ARCH_SPARSEMEM_ENABLE
select SYS_SUPPORTS_NUMA
+ select SYS_SUPPORTS_SMP
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
config CPU_SUBTYPE_SHX3
bool "Support SH-X3 processor"
@@ -648,7 +656,7 @@
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "4" if CPU_SHX3
+ default "4" if CPU_SUBTYPE_SHX3
default "2"
help
This allows you to specify the maximum number of CPUs which this
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index ea9d4f4..69d56dd 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -97,7 +97,7 @@
dma_unmap_single(dev, dma_address, size, dir);
}
-static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
+static inline void __dma_sync_single(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
@@ -119,7 +119,7 @@
dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
}
-static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
+static inline void __dma_sync_sg(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
int i;
@@ -137,7 +137,7 @@
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
- dma_sync_single(dev, dma_handle, size, dir);
+ __dma_sync_single(dev, dma_handle, size, dir);
debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
}
@@ -146,7 +146,7 @@
size_t size,
enum dma_data_direction dir)
{
- dma_sync_single(dev, dma_handle, size, dir);
+ __dma_sync_single(dev, dma_handle, size, dir);
debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
}
@@ -177,7 +177,7 @@
struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
- dma_sync_sg(dev, sg, nelems, dir);
+ __dma_sync_sg(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
@@ -185,7 +185,7 @@
struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
- dma_sync_sg(dev, sg, nelems, dir);
+ __dma_sync_sg(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index a88895e..ab79e1f 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -154,6 +154,7 @@
void per_cpu_trap_init(void);
void default_idle(void);
+void cpu_idle_wait(void);
asmlinkage void break_point_trap(void);
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 96ea09c..ebdd391 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -16,7 +16,7 @@
obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o
# SMP setup
-smp-$(CONFIG_CPU_SUBTYPE_SHX3) := smp-shx3.o
+smp-$(CONFIG_CPU_SHX3) := smp-shx3.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o
@@ -38,6 +38,6 @@
pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
-obj-y += $(clock-y)
-obj-$(CONFIG_SMP) += $(smp-y)
+obj-y += $(clock-y)
+obj-$(CONFIG_SMP) += $(smp-y)
obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 93e0d2c..b700494 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -595,9 +595,8 @@
HSPI,
GPIO0, GPIO1,
Thermal,
- INTC0, INTC1, INTC2, INTC3, INTC4, INTC5, INTC6, INTC7,
-
- /* interrupt groups */
+ INTICI0, INTICI1, INTICI2, INTICI3,
+ INTICI4, INTICI5, INTICI6, INTICI7,
};
static struct intc_vect vectors[] __initdata = {
@@ -638,10 +637,12 @@
INTC_VECT(HSPI, 0xe80),
INTC_VECT(GPIO0, 0xea0), INTC_VECT(GPIO1, 0xec0),
INTC_VECT(Thermal, 0xee0),
+ INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20),
+ INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60),
+ INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0),
+ INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0),
};
-/* FIXME: Main CPU support only now */
-#if 1 /* Main CPU */
#define CnINTMSK0 0xfe410030
#define CnINTMSK1 0xfe410040
#define CnINTMSKCLR0 0xfe410050
@@ -654,21 +655,6 @@
#define CnINT2MSKCR1 0xfe410a34
#define CnINT2MSKCR2 0xfe410a38
#define CnINT2MSKCR3 0xfe410a3c
-#else /* Sub CPU */
-#define CnINTMSK0 0xfe410034
-#define CnINTMSK1 0xfe410044
-#define CnINTMSKCLR0 0xfe410054
-#define CnINTMSKCLR1 0xfe410064
-#define CnINT2MSKR0 0xfe410b20
-#define CnINT2MSKR1 0xfe410b24
-#define CnINT2MSKR2 0xfe410b28
-#define CnINT2MSKR3 0xfe410b2c
-#define CnINT2MSKCR0 0xfe410b30
-#define CnINT2MSKCR1 0xfe410b34
-#define CnINT2MSKCR2 0xfe410b38
-#define CnINT2MSKCR3 0xfe410b3c
-#endif
-
#define INTMSK2 0xfe410068
#define INTMSKCLR2 0xfe41006c
@@ -753,6 +739,9 @@
GPIO1, Thermal } },
{ 0xfe41085c, 0, 32, 8, /* INT2PRI23 */ { 0, 0, 0, 0 } },
{ 0xfe410860, 0, 32, 8, /* INT2PRI24 */ { 0, 0, 0, 0 } },
+ { 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */
+ { INTICI7, INTICI6, INTICI5, INTICI4,
+ INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) },
};
static DECLARE_INTC_DESC(intc_desc, "sh7786", vectors, NULL,
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index f35ed03..27ff2dc 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -1,7 +1,7 @@
/*
* The idle loop for all SuperH platforms.
*
- * Copyright (C) 2002 - 2008 Paul Mundt
+ * Copyright (C) 2002 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -15,6 +15,7 @@
#include <linux/preempt.h>
#include <linux/thread_info.h>
#include <linux/irqflags.h>
+#include <linux/smp.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/atomic.h>
@@ -79,3 +80,23 @@
check_pgt_cache();
}
}
+
+static void do_nothing(void *unused)
+{
+}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+ smp_mb();
+ /* kick all the CPUs so that they exit out of pm_idle */
+ smp_call_function(do_nothing, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index ee8e6bb..fe532ae 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -70,7 +70,7 @@
}
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
- flush_tlb_one(get_asid(), addr);
+ local_flush_tlb_one(get_asid(), addr);
}
/*
@@ -177,10 +177,8 @@
free_area_init_nodes(max_zone_pfns);
-#ifdef CONFIG_SUPERH32
/* Set up the uncached fixmap */
set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
-#endif
}
static struct kcore_list kcore_mem, kcore_vmalloc;
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 96041a8..1ff0fd9 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -15,7 +15,7 @@
ifeq ($(CONFIG_SPARC32),y)
quiet_cmd_piggy = PIGGY $@
- cmd_piggy = $(obj)/piggyback_32 $@ $(obj)/System.map $(ROOT_IMG)
+ cmd_piggy = $(obj)/piggyback_32 $@ System.map $(ROOT_IMG)
quiet_cmd_btfix = BTFIX $@
cmd_btfix = $(OBJDUMP) -x vmlinux | $(obj)/btfixupprep > $@
quiet_cmd_sysmap = SYSMAP $(obj)/System.map
@@ -58,7 +58,7 @@
$(obj)/zImage: $(obj)/image
$(call if_changed,strip)
-$(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE
+$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_32 System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)
@@ -79,7 +79,7 @@
$(call if_changed,strip)
@echo ' kernel: $@ is ready'
-$(obj)/tftpboot.img: vmlinux $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
+$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback_64 System.map $(ROOT_IMG) FORCE
$(call if_changed,elftoaout)
$(call if_changed,piggy)
@echo ' kernel: $@ is ready'
diff --git a/arch/sparc/boot/piggyback_32.c b/arch/sparc/boot/piggyback_32.c
index c9f500c..e8dc9ad 100644
--- a/arch/sparc/boot/piggyback_32.c
+++ b/arch/sparc/boot/piggyback_32.c
@@ -70,7 +70,7 @@
int main(int argc,char **argv)
{
static char aout_magic[] = { 0x01, 0x03, 0x01, 0x07 };
- unsigned char buffer[1024], *q, *r;
+ char buffer[1024], *q, *r;
unsigned int i, j, k, start, end, offset;
FILE *map;
struct stat s;
@@ -84,7 +84,7 @@
while (fgets (buffer, 1024, map)) {
if (!strcmp (buffer + 8, " T start\n") || !strcmp (buffer + 16, " T start\n"))
start = strtoul (buffer, NULL, 16);
- else if (!strcmp (buffer + 8, " A end\n") || !strcmp (buffer + 16, " A end\n"))
+ else if (!strcmp (buffer + 8, " A _end\n") || !strcmp (buffer + 16, " A _end\n"))
end = strtoul (buffer, NULL, 16);
}
fclose (map);
diff --git a/arch/sparc/boot/piggyback_64.c b/arch/sparc/boot/piggyback_64.c
index de364bf..c63fd1b 100644
--- a/arch/sparc/boot/piggyback_64.c
+++ b/arch/sparc/boot/piggyback_64.c
@@ -46,6 +46,7 @@
struct stat s;
int image, tail;
+ start = end = 0;
if (stat (argv[3], &s) < 0) die (argv[3]);
map = fopen (argv[2], "r");
if (!map) die(argv[2]);
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index bd07505..f0ee790 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/bootmem.h>
#include <linux/irq.h>
#include <asm/ptrace.h>
@@ -914,25 +913,19 @@
tb->nonresum_qmask);
}
-static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
+/* Each queue region must be a power of 2 multiple of 64 bytes in
+ * size. The base real address must be aligned to the size of the
+ * region. Thus, an 8KB queue must be 8KB aligned, for example.
+ */
+static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
- void *p = __alloc_bootmem(size, size, 0);
+ unsigned long order = get_order(size);
+ unsigned long p;
+
+ p = __get_free_pages(GFP_KERNEL, order);
if (!p) {
- prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
- prom_halt();
- }
-
- *pa_ptr = __pa(p);
-}
-
-static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
-{
- unsigned long size = PAGE_ALIGN(qmask + 1);
- void *p = __alloc_bootmem(size, size, 0);
-
- if (!p) {
- prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
+ prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();
}
@@ -942,11 +935,11 @@
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
- void *page;
+ unsigned long page;
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
- page = alloc_bootmem_pages(PAGE_SIZE);
+ page = get_zeroed_page(GFP_KERNEL);
if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
prom_halt();
@@ -965,13 +958,13 @@
for_each_possible_cpu(cpu) {
struct trap_per_cpu *tb = &trap_block[cpu];
- alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
- alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
- alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
- alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
- alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
- alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
- tb->nonresum_qmask);
+ alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
+ alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
+ alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
+ alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
+ alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
+ alloc_one_queue(&tb->nonresum_kernel_buf_pa,
+ tb->nonresum_qmask);
}
}
@@ -999,7 +992,7 @@
kill_prom_timer();
size = sizeof(struct ino_bucket) * NUM_IVECS;
- ivector_table = alloc_bootmem(size);
+ ivector_table = kzalloc(size, GFP_KERNEL);
if (!ivector_table) {
prom_printf("Fatal error, cannot allocate ivector_table\n");
prom_halt();
diff --git a/arch/um/drivers/slip_kern.c b/arch/um/drivers/slip_kern.c
index 5ec1756..dd2aadc 100644
--- a/arch/um/drivers/slip_kern.c
+++ b/arch/um/drivers/slip_kern.c
@@ -30,7 +30,6 @@
slip_proto_init(&spri->slip);
- dev->init = NULL;
dev->hard_header_len = 0;
dev->header_ops = NULL;
dev->addr_len = 0;
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index f15a6e7..e376284 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -32,7 +32,6 @@
slip_proto_init(&spri->slip);
- dev->init = NULL;
dev->hard_header_len = 0;
dev->header_ops = NULL;
dev->addr_len = 0;
diff --git a/arch/um/include/asm/dma-mapping.h b/arch/um/include/asm/dma-mapping.h
index 90fc708..378de4b 100644
--- a/arch/um/include/asm/dma-mapping.h
+++ b/arch/um/include/asm/dma-mapping.h
@@ -79,14 +79,14 @@
}
static inline void
-dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
-dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 4518dc5..20d1465 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -144,6 +144,7 @@
#else /* !CONFIG_ACPI */
+#define acpi_disabled 1
#define acpi_lapic 0
#define acpi_ioapic 0
static inline void acpi_noirq_set(void) { }
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index af326a2..fd6d21b 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,6 +6,7 @@
extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
+extern int iommu_pass_through;
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index b51a1e8..927958d 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -130,6 +130,7 @@
/* generic pci stuff */
#include <asm-generic/pci.h>
+#define PCIBIOS_MAX_MEM_32 0xffffffff
#ifdef CONFIG_NUMA
/* Returns the node based on pci bus */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index e60fd3e..b399988 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -121,6 +121,9 @@
extern int __init pci_mmcfg_arch_init(void);
extern void __init pci_mmcfg_arch_free(void);
+extern struct acpi_mcfg_allocation *pci_mmcfg_config;
+extern int pci_mmcfg_config_num;
+
/*
* AMD Fam10h CPUs are buggy, and cannot access MMIO config space
* on their northbrige except through the * %eax register. As such, you MUST
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6310861..6b8ca3a 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -44,11 +44,7 @@
static int __initdata acpi_force = 0;
u32 acpi_rsdt_forced;
-#ifdef CONFIG_ACPI
-int acpi_disabled = 0;
-#else
-int acpi_disabled = 1;
-#endif
+int acpi_disabled;
EXPORT_SYMBOL(acpi_disabled);
#ifdef CONFIG_X86_64
@@ -122,72 +118,6 @@
early_iounmap(map, size);
}
-#ifdef CONFIG_PCI_MMCONFIG
-
-static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
-
-/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
-struct acpi_mcfg_allocation *pci_mmcfg_config;
-int pci_mmcfg_config_num;
-
-static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
-{
- if (!strcmp(mcfg->header.oem_id, "SGI"))
- acpi_mcfg_64bit_base_addr = TRUE;
-
- return 0;
-}
-
-int __init acpi_parse_mcfg(struct acpi_table_header *header)
-{
- struct acpi_table_mcfg *mcfg;
- unsigned long i;
- int config_size;
-
- if (!header)
- return -EINVAL;
-
- mcfg = (struct acpi_table_mcfg *)header;
-
- /* how many config structures do we have */
- pci_mmcfg_config_num = 0;
- i = header->length - sizeof(struct acpi_table_mcfg);
- while (i >= sizeof(struct acpi_mcfg_allocation)) {
- ++pci_mmcfg_config_num;
- i -= sizeof(struct acpi_mcfg_allocation);
- };
- if (pci_mmcfg_config_num == 0) {
- printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
- return -ENODEV;
- }
-
- config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
- pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
- if (!pci_mmcfg_config) {
- printk(KERN_WARNING PREFIX
- "No memory for MCFG config tables\n");
- return -ENOMEM;
- }
-
- memcpy(pci_mmcfg_config, &mcfg[1], config_size);
-
- acpi_mcfg_oem_check(mcfg);
-
- for (i = 0; i < pci_mmcfg_config_num; ++i) {
- if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
- !acpi_mcfg_64bit_base_addr) {
- printk(KERN_ERR PREFIX
- "MMCONFIG not in low 4GB of memory\n");
- kfree(pci_mmcfg_config);
- pci_mmcfg_config_num = 0;
- return -ENODEV;
- }
- }
-
- return 0;
-}
-#endif /* CONFIG_PCI_MMCONFIG */
-
#ifdef CONFIG_X86_LOCAL_APIC
static int __init acpi_parse_madt(struct acpi_table_header *table)
{
@@ -1519,14 +1449,6 @@
},
{
.callback = force_acpi_ht,
- .ident = "ASUS P4B266",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
- },
- },
- {
- .callback = force_acpi_ht,
.ident = "ASUS P2B-DS",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index bbbe4bb..8c44c23 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -34,12 +34,22 @@
flags->bm_check = 1;
else if (c->x86_vendor == X86_VENDOR_INTEL) {
/*
- * Today all CPUs that support C3 share cache.
- * TBD: This needs to look at cache shared map, once
- * multi-core detection patch makes to the base.
+ * Today all MP CPUs that support C3 share cache.
+ * And caches should not be flushed by software while
+ * entering C3 type state.
*/
flags->bm_check = 1;
}
+
+ /*
+ * On all recent Intel platforms, ARB_DISABLE is a nop.
+ * So, set bm_control to zero to indicate that ARB_DISABLE
+ * is not required while entering C3 type state on
+ * P4, Core and beyond CPUs
+ */
+ if (c->x86_vendor == X86_VENDOR_INTEL &&
+ (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 14)))
+ flags->bm_control = 0;
}
EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index 7c074ee..d296f4a 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -72,6 +72,7 @@
return;
}
+
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
@@ -85,3 +86,15 @@
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
+
+void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr)
+{
+ if (pr->pdc) {
+ kfree(pr->pdc->pointer->buffer.pointer);
+ kfree(pr->pdc->pointer);
+ kfree(pr->pdc);
+ pr->pdc = NULL;
+ }
+}
+
+EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b7a7920..4d0216f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1414,6 +1414,9 @@
irte.vector = vector;
irte.dest_id = IRTE_DEST(destination);
+ /* Set source-id of interrupt request */
+ set_ioapic_sid(&irte, apic_id);
+
modify_irte(irq, &irte);
ir_entry->index2 = (index >> 15) & 0x1;
@@ -3290,6 +3293,9 @@
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
+ /* Set source-id of interrupt request */
+ set_msi_sid(&irte, pdev);
+
modify_irte(irq, &irte);
msg->address_hi = MSI_ADDR_BASE_HI;
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 328592f..4763047 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -32,6 +32,8 @@
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0;
+int iommu_pass_through;
+
dma_addr_t bad_dma_address __read_mostly = 0;
EXPORT_SYMBOL(bad_dma_address);
@@ -209,6 +211,10 @@
#ifdef CONFIG_SWIOTLB
if (!strncmp(p, "soft", 4))
swiotlb = 1;
+ if (!strncmp(p, "pt", 2)) {
+ iommu_pass_through = 1;
+ return 1;
+ }
#endif
gart_parse_options(p);
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index a1712f2..6af96ee 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -71,7 +71,8 @@
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
- if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
+ if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
+ iommu_pass_through)
swiotlb = 1;
#endif
if (swiotlb_force)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index c0ecf25..b26626d 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -38,15 +38,26 @@
struct acpi_resource_address64 addr;
acpi_status status;
- if (info->res_num >= PCI_BUS_NUM_RESOURCES)
- return AE_OK;
-
status = resource_to_addr(acpi_res, &addr);
if (ACPI_SUCCESS(status))
info->res_num++;
return AE_OK;
}
+static int
+bus_has_transparent_bridge(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 class = dev->class >> 8;
+
+ if (class == PCI_CLASS_BRIDGE_PCI && dev->transparent)
+ return true;
+ }
+ return false;
+}
+
static acpi_status
setup_resource(struct acpi_resource *acpi_res, void *data)
{
@@ -56,9 +67,7 @@
acpi_status status;
unsigned long flags;
struct resource *root;
-
- if (info->res_num >= PCI_BUS_NUM_RESOURCES)
- return AE_OK;
+ int max_root_bus_resources = PCI_BUS_NUM_RESOURCES;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
@@ -82,6 +91,18 @@
res->end = res->start + addr.address_length - 1;
res->child = NULL;
+ if (bus_has_transparent_bridge(info->bus))
+ max_root_bus_resources -= 3;
+ if (info->res_num >= max_root_bus_resources) {
+ printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx "
+ "from %s for %s due to _CRS returning more than "
+ "%d resource descriptors\n", (unsigned long) res->start,
+ (unsigned long) res->end, root->name, info->name,
+ max_root_bus_resources);
+ info->res_num++;
+ return AE_OK;
+ }
+
if (insert_resource(root, res)) {
printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx "
"from %s for %s\n", (unsigned long) res->start,
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 8766b0e..712443e 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -523,6 +523,69 @@
static int __initdata known_bridge;
+static int acpi_mcfg_64bit_base_addr __initdata = FALSE;
+
+/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
+struct acpi_mcfg_allocation *pci_mmcfg_config;
+int pci_mmcfg_config_num;
+
+static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
+{
+ if (!strcmp(mcfg->header.oem_id, "SGI"))
+ acpi_mcfg_64bit_base_addr = TRUE;
+
+ return 0;
+}
+
+static int __init pci_parse_mcfg(struct acpi_table_header *header)
+{
+ struct acpi_table_mcfg *mcfg;
+ unsigned long i;
+ int config_size;
+
+ if (!header)
+ return -EINVAL;
+
+ mcfg = (struct acpi_table_mcfg *)header;
+
+ /* how many config structures do we have */
+ pci_mmcfg_config_num = 0;
+ i = header->length - sizeof(struct acpi_table_mcfg);
+ while (i >= sizeof(struct acpi_mcfg_allocation)) {
+ ++pci_mmcfg_config_num;
+ i -= sizeof(struct acpi_mcfg_allocation);
+ };
+ if (pci_mmcfg_config_num == 0) {
+ printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
+ return -ENODEV;
+ }
+
+ config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
+ pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
+ if (!pci_mmcfg_config) {
+ printk(KERN_WARNING PREFIX
+ "No memory for MCFG config tables\n");
+ return -ENOMEM;
+ }
+
+ memcpy(pci_mmcfg_config, &mcfg[1], config_size);
+
+ acpi_mcfg_oem_check(mcfg);
+
+ for (i = 0; i < pci_mmcfg_config_num; ++i) {
+ if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
+ !acpi_mcfg_64bit_base_addr) {
+ printk(KERN_ERR PREFIX
+ "MMCONFIG not in low 4GB of memory\n");
+ kfree(pci_mmcfg_config);
+ pci_mmcfg_config_num = 0;
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
static void __init __pci_mmcfg_init(int early)
{
/* MMCONFIG disabled */
@@ -543,7 +606,7 @@
}
if (!known_bridge)
- acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
+ acpi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
pci_mmcfg_reject_broken(early);
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 768bee0..bb84fbc 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -263,7 +263,54 @@
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
-# CONFIG_NETDEVICES is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+CONFIG_SMSC_PHY=y
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_NET_ETHERNET is not set
+CONFIG_NETDEV_1000=y
+CONFIG_S6GMAC=y
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -304,8 +351,6 @@
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
@@ -387,7 +432,59 @@
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+# CONFIG_RTC_INTF_SYSFS is not set
+# CONFIG_RTC_INTF_PROC is not set
+# CONFIG_RTC_INTF_DEV is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+CONFIG_RTC_DRV_M41T80=y
+# CONFIG_RTC_DRV_M41T80_WDT is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 8fc1c0c..b7b8fbe 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -155,5 +155,100 @@
#endif
+#define XTENSA_CACHEBLK_LOG2 29
+#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
+#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
+
+#if XCHAL_HAVE_CACHEATTR
+static inline u32 xtensa_get_cacheattr(void)
+{
+ u32 r;
+ asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
+ return r;
+}
+
+static inline u32 xtensa_get_dtlb1(u32 addr)
+{
+ u32 r = addr & XTENSA_CACHEBLK_MASK;
+ return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
+ & 0xF);
+}
+#else
+static inline u32 xtensa_get_dtlb1(u32 addr)
+{
+ u32 r;
+ asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
+ asm volatile(" dsync");
+ return r;
+}
+
+static inline u32 xtensa_get_cacheattr(void)
+{
+ u32 r = 0;
+ u32 a = 0;
+ do {
+ a -= XTENSA_CACHEBLK_SIZE;
+ r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
+ } while (a);
+ return r;
+}
+#endif
+
+static inline int xtensa_need_flush_dma_source(u32 addr)
+{
+ return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
+}
+
+static inline int xtensa_need_invalidate_dma_destination(u32 addr)
+{
+ return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
+}
+
+static inline void flush_dcache_unaligned(u32 addr, u32 size)
+{
+ u32 cnt;
+ if (size) {
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt--) {
+ asm volatile(" dhwb %0, 0" : : "a"(addr));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dsync");
+ }
+}
+
+static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
+{
+ int cnt;
+ if (size) {
+ asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt-- > 0) {
+ asm volatile(" dhi %0, %1" : : "a"(addr),
+ "n"(XCHAL_DCACHE_LINESIZE));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dhwbi %0, %1" : : "a"(addr),
+ "n"(XCHAL_DCACHE_LINESIZE));
+ asm volatile(" dsync");
+ }
+}
+
+static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
+{
+ u32 cnt;
+ if (size) {
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt--) {
+ asm volatile(" dhwbi %0, 0" : : "a"(addr));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dsync");
+ }
+}
+
#endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/gpio.h b/arch/xtensa/include/asm/gpio.h
index 0763b07..a8c9fc4 100644
--- a/arch/xtensa/include/asm/gpio.h
+++ b/arch/xtensa/include/asm/gpio.h
@@ -38,14 +38,14 @@
return __gpio_cansleep(gpio);
}
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return __gpio_to_irq(gpio);
+}
+
/*
* Not implemented, yet.
*/
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return -ENOSYS;
-}
-
static inline int irq_to_gpio(unsigned int irq)
{
return -EINVAL;
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index dfac82d..4c0ccc9 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -11,6 +11,7 @@
#ifndef _XTENSA_IRQ_H
#define _XTENSA_IRQ_H
+#include <linux/init.h>
#include <platform/hardware.h>
#include <variant/core.h>
@@ -21,11 +22,20 @@
static inline void variant_irq_disable(unsigned int irq) { }
#endif
+#ifndef VARIANT_NR_IRQS
+# define VARIANT_NR_IRQS 0
+#endif
#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+
+#if VARIANT_NR_IRQS == 0
+static inline void variant_init_irq(void) { }
+#else
+void variant_init_irq(void) __init;
+#endif
static __inline__ int irq_canonicalize(int irq)
{
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a36c85e..a1badb3 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -197,4 +197,6 @@
}
cached_irq_mask = 0;
+
+ variant_init_irq();
}
diff --git a/arch/xtensa/platforms/s6105/device.c b/arch/xtensa/platforms/s6105/device.c
index 78b08be..65333ff 100644
--- a/arch/xtensa/platforms/s6105/device.c
+++ b/arch/xtensa/platforms/s6105/device.c
@@ -5,14 +5,27 @@
*/
#include <linux/kernel.h>
+#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <variant/hardware.h>
+#include <variant/dmac.h>
+#include <platform/gpio.h>
+
+#define GPIO3_INTNUM 3
#define UART_INTNUM 4
+#define GMAC_INTNUM 5
+
+static const signed char gpio3_irq_mappings[] = {
+ S6_INTC_GPIO(3),
+ -1
+};
static const signed char uart_irq_mappings[] = {
S6_INTC_UART(0),
@@ -20,8 +33,18 @@
-1,
};
+static const signed char gmac_irq_mappings[] = {
+ S6_INTC_GMAC_STAT,
+ S6_INTC_GMAC_ERR,
+ S6_INTC_DMA_HOSTTERMCNT(0),
+ S6_INTC_DMA_HOSTTERMCNT(1),
+ -1
+};
+
const signed char *platform_irq_mappings[NR_IRQS] = {
+ [GPIO3_INTNUM] = gpio3_irq_mappings,
[UART_INTNUM] = uart_irq_mappings,
+ [GMAC_INTNUM] = gmac_irq_mappings,
};
static struct plat_serial8250_port serial_platform_data[] = {
@@ -46,6 +69,66 @@
{ },
};
+static struct resource s6_gmac_resource[] = {
+ {
+ .name = "mem",
+ .start = (resource_size_t)S6_REG_GMAC,
+ .end = (resource_size_t)S6_REG_GMAC + 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "dma",
+ .start = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX),
+ .end = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX) + 0x100 - 1,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "dma",
+ .start = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX),
+ .end = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX) + 0x100 - 1,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "io",
+ .start = (resource_size_t)S6_MEM_GMAC,
+ .end = (resource_size_t)S6_MEM_GMAC + 0x2000000 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "irq",
+ .start = (resource_size_t)GMAC_INTNUM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "irq",
+ .start = (resource_size_t)PHY_POLL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __init prepare_phy_irq(int pin)
+{
+ int irq;
+ if (gpio_request(pin, "s6gmac_phy") < 0)
+ goto fail;
+ if (gpio_direction_input(pin) < 0)
+ goto free;
+ irq = gpio_to_irq(pin);
+ if (irq < 0)
+ goto free;
+ if (set_irq_type(irq, IRQ_TYPE_LEVEL_LOW) < 0)
+ goto free;
+ return irq;
+free:
+ gpio_free(pin);
+fail:
+ return PHY_POLL;
+}
+
static struct platform_device platform_devices[] = {
{
.name = "serial8250",
@@ -54,12 +137,23 @@
.platform_data = serial_platform_data,
},
},
+ {
+ .name = "s6gmac",
+ .id = 0,
+ .resource = s6_gmac_resource,
+ .num_resources = ARRAY_SIZE(s6_gmac_resource),
+ },
+ {
+ I2C_BOARD_INFO("m41t62", S6I2C_ADDR_M41T62),
+ },
};
static int __init device_init(void)
{
int i;
+ s6_gmac_resource[5].start = prepare_phy_irq(GPIO_PHY_IRQ);
+
for (i = 0; i < ARRAY_SIZE(platform_devices); i++)
platform_device_register(&platform_devices[i]);
return 0;
diff --git a/arch/xtensa/platforms/s6105/setup.c b/arch/xtensa/platforms/s6105/setup.c
index 855ddea..86ce730 100644
--- a/arch/xtensa/platforms/s6105/setup.c
+++ b/arch/xtensa/platforms/s6105/setup.c
@@ -35,12 +35,21 @@
{
unsigned long reg;
+ reg = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
+ reg &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC |
+ S6_GREG1_PLLSEL_GMII_MASK << S6_GREG1_PLLSEL_GMII);
+ reg |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC |
+ S6_GREG1_PLLSEL_GMII_125MHZ << S6_GREG1_PLLSEL_GMII;
+ writel(reg, S6_REG_GREG1 + S6_GREG1_PLLSEL);
+
reg = readl(S6_REG_GREG1 + S6_GREG1_CLKGATE);
reg &= ~(1 << S6_GREG1_BLOCK_SB);
+ reg &= ~(1 << S6_GREG1_BLOCK_GMAC);
writel(reg, S6_REG_GREG1 + S6_GREG1_CLKGATE);
reg = readl(S6_REG_GREG1 + S6_GREG1_BLOCKENA);
reg |= 1 << S6_GREG1_BLOCK_SB;
+ reg |= 1 << S6_GREG1_BLOCK_GMAC;
writel(reg, S6_REG_GREG1 + S6_GREG1_BLOCKENA);
printk(KERN_NOTICE "S6105 on Stretch S6000 - "
@@ -49,7 +58,7 @@
void __init platform_init(bp_tag_t *first)
{
- s6_gpio_init();
+ s6_gpio_init(0);
gpio_request(GPIO_LED1_NGREEN, "led1_green");
gpio_request(GPIO_LED1_RED, "led1_red");
gpio_direction_output(GPIO_LED1_NGREEN, 1);
diff --git a/arch/xtensa/variants/s6000/Makefile b/arch/xtensa/variants/s6000/Makefile
index d83f380..3e7ef0a 100644
--- a/arch/xtensa/variants/s6000/Makefile
+++ b/arch/xtensa/variants/s6000/Makefile
@@ -1,4 +1,4 @@
# s6000 Makefile
-obj-y += irq.o gpio.o
+obj-y += irq.o gpio.o dmac.o
obj-$(CONFIG_XTENSA_CALIBRATE_CCOUNT) += delay.o
diff --git a/arch/xtensa/variants/s6000/dmac.c b/arch/xtensa/variants/s6000/dmac.c
new file mode 100644
index 0000000..dc7f7c5
--- /dev/null
+++ b/arch/xtensa/variants/s6000/dmac.c
@@ -0,0 +1,173 @@
+/*
+ * Authors: Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ * (c) 2008 emlix GmbH http://www.emlix.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <asm/cacheflush.h>
+#include <variant/dmac.h>
+
+/* DMA engine lookup */
+
+struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
+
+
+/* DMA control, per engine */
+
+void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size)
+{
+ if (xtensa_need_flush_dma_source(src)) {
+ u32 base = src;
+ u32 span = size;
+ u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ if (chunk && (size > chunk)) {
+ s32 skip =
+ readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
+ u32 gaps = (size+chunk-1)/chunk - 1;
+ if (skip >= 0) {
+ span += gaps * skip;
+ } else if (-skip > chunk) {
+ s32 decr = gaps * (chunk + skip);
+ base += decr;
+ span = chunk - decr;
+ } else {
+ span = max(span + gaps * skip,
+ (chunk + skip) * gaps - skip);
+ }
+ }
+ flush_dcache_unaligned(base, span);
+ }
+ if (xtensa_need_invalidate_dma_destination(dst)) {
+ u32 base = dst;
+ u32 span = size;
+ u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ if (chunk && (size > chunk)) {
+ s32 skip =
+ readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
+ u32 gaps = (size+chunk-1)/chunk - 1;
+ if (skip >= 0) {
+ span += gaps * skip;
+ } else if (-skip > chunk) {
+ s32 decr = gaps * (chunk + skip);
+ base += decr;
+ span = chunk - decr;
+ } else {
+ span = max(span + gaps * skip,
+ (chunk + skip) * gaps - skip);
+ }
+ }
+ invalidate_dcache_unaligned(base, span);
+ }
+ s6dmac_put_fifo(dmac, chan, src, dst, size);
+}
+
+void s6dmac_disable_error_irqs(u32 dmac, u32 mask)
+{
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ _s6dmac_disable_error_irqs(dmac, mask);
+ spin_unlock_irqrestore(spinl, flags);
+}
+
+u32 s6dmac_int_sources(u32 dmac, u32 channel)
+{
+ u32 mask, ret, tmp;
+ mask = 1 << channel;
+
+ tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR);
+ ret = tmp >> channel;
+
+ tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR);
+ ret |= (tmp >> channel) << 1;
+
+ tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR);
+ ret |= (tmp >> channel) << 2;
+
+ tmp = readl(dmac + S6_DMA_INTRAW0);
+ tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER);
+ writel(tmp, dmac + S6_DMA_INTCLEAR0);
+
+ if (tmp & (mask << S6_DMA_INT0_UNDER))
+ ret |= 1 << 3;
+ if (tmp & (mask << S6_DMA_INT0_OVER))
+ ret |= 1 << 4;
+
+ tmp = readl(dmac + S6_DMA_MASTERERRINFO);
+ mask <<= S6_DMA_INT1_CHANNEL;
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << S6_DMA_INT1_MASTER;
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << (S6_DMA_INT1_MASTER + 1);
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << (S6_DMA_INT1_MASTER + 2);
+
+ tmp = readl(dmac + S6_DMA_INTRAW1) & mask;
+ writel(tmp, dmac + S6_DMA_INTCLEAR1);
+ ret |= ((tmp >> channel) & 1) << 5;
+ ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6;
+
+ return ret;
+}
+
+void s6dmac_release_chan(u32 dmac, int chan)
+{
+ if (chan >= 0)
+ s6dmac_disable_chan(dmac, chan);
+}
+
+
+/* global init */
+
+static inline void __init dmac_init(u32 dmac, u8 chan_nb)
+{
+ s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac;
+ spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock);
+ s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb;
+ writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER,
+ dmac + S6_DMA_INTCLEAR1);
+}
+
+static inline void __init dmac_master(u32 dmac,
+ u32 m0start, u32 m0end, u32 m1start, u32 m1end)
+{
+ writel(m0start, dmac + S6_DMA_MASTER0START);
+ writel(m0end - 1, dmac + S6_DMA_MASTER0END);
+ writel(m1start, dmac + S6_DMA_MASTER1START);
+ writel(m1end - 1, dmac + S6_DMA_MASTER1END);
+}
+
+static void __init s6_dmac_init(void)
+{
+ dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB);
+ dmac_master(S6_REG_LMSDMA,
+ S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC);
+ dmac_init(S6_REG_NIDMA, S6_NIDMA_NB);
+ dmac_init(S6_REG_DPDMA, S6_DPDMA_NB);
+ dmac_master(S6_REG_DPDMA,
+ S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA);
+ dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB);
+ dmac_master(S6_REG_HIFDMA,
+ S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX);
+}
+
+arch_initcall(s6_dmac_init);
diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c
index 79317fd..380a70f 100644
--- a/arch/xtensa/variants/s6000/gpio.c
+++ b/arch/xtensa/variants/s6000/gpio.c
@@ -4,15 +4,20 @@
* Copyright (c) 2009 emlix GmbH
* Authors: Oskar Schirmer <os@emlix.com>
* Johannes Weiner <jw@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/gpio.h>
#include <variant/hardware.h>
+#define IRQ_BASE XTENSA_NR_IRQS
+
#define S6_GPIO_DATA 0x000
#define S6_GPIO_IS 0x404
#define S6_GPIO_IBE 0x408
@@ -52,19 +57,175 @@
writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
}
+static int to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset < 8)
+ return offset + IRQ_BASE;
+ return -EINVAL;
+}
+
static struct gpio_chip gpiochip = {
.owner = THIS_MODULE,
.direction_input = direction_input,
.get = get,
.direction_output = direction_output,
.set = set,
+ .to_irq = to_irq,
.base = 0,
.ngpio = 24,
.can_sleep = 0, /* no blocking io needed */
.exported = 0, /* no exporting to userspace */
};
-int s6_gpio_init(void)
+int s6_gpio_init(u32 afsel)
{
+ writeb(afsel, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL);
+ writeb(afsel >> 8, S6_REG_GPIO + S6_GPIO_BANK(1) + S6_GPIO_AFSEL);
+ writeb(afsel >> 16, S6_REG_GPIO + S6_GPIO_BANK(2) + S6_GPIO_AFSEL);
return gpiochip_add(&gpiochip);
}
+
+static void ack(unsigned int irq)
+{
+ writeb(1 << (irq - IRQ_BASE), S6_REG_GPIO + S6_GPIO_IC);
+}
+
+static void mask(unsigned int irq)
+{
+ u8 r = readb(S6_REG_GPIO + S6_GPIO_IE);
+ r &= ~(1 << (irq - IRQ_BASE));
+ writeb(r, S6_REG_GPIO + S6_GPIO_IE);
+}
+
+static void unmask(unsigned int irq)
+{
+ u8 m = readb(S6_REG_GPIO + S6_GPIO_IE);
+ m |= 1 << (irq - IRQ_BASE);
+ writeb(m, S6_REG_GPIO + S6_GPIO_IE);
+}
+
+static int set_type(unsigned int irq, unsigned int type)
+{
+ const u8 m = 1 << (irq - IRQ_BASE);
+ irq_flow_handler_t handler;
+ struct irq_desc *desc;
+ u8 reg;
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL) & m)
+ || (readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE) & m)
+ || readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_DIR
+ + S6_GPIO_MASK(irq - IRQ_BASE)))
+ return 0;
+ type = IRQ_TYPE_EDGE_BOTH;
+ }
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
+ reg |= m;
+ handler = handle_level_irq;
+ } else {
+ reg &= ~m;
+ handler = handle_edge_irq;
+ }
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
+ desc = irq_to_desc(irq);
+ desc->handle_irq = handler;
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
+ if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING))
+ reg |= m;
+ else
+ reg &= ~m;
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ reg |= m;
+ else
+ reg &= ~m;
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
+ return 0;
+}
+
+static struct irq_chip gpioirqs = {
+ .name = "GPIO",
+ .ack = ack,
+ .mask = mask,
+ .unmask = unmask,
+ .set_type = set_type,
+};
+
+static u8 demux_masks[4];
+
+static void demux_irqs(unsigned int irq, struct irq_desc *desc)
+{
+ u8 *mask = get_irq_desc_data(desc);
+ u8 pending;
+ int cirq;
+
+ desc->chip->mask(irq);
+ desc->chip->ack(irq);
+ pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
+ cirq = IRQ_BASE - 1;
+ while (pending) {
+ int n = ffs(pending);
+ cirq += n;
+ pending >>= n;
+ generic_handle_irq(cirq);
+ }
+ desc->chip->unmask(irq);
+}
+
+extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
+
+void __init variant_init_irq(void)
+{
+ int irq, n;
+ writeb(0, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE);
+ for (irq = n = 0; irq < XTENSA_NR_IRQS; irq++) {
+ const signed char *mapping = platform_irq_mappings[irq];
+ int alone = 1;
+ u8 mask;
+ if (!mapping)
+ continue;
+ for(mask = 0; *mapping != -1; mapping++)
+ switch (*mapping) {
+ case S6_INTC_GPIO(0):
+ mask |= 1 << 0;
+ break;
+ case S6_INTC_GPIO(1):
+ mask |= 1 << 1;
+ break;
+ case S6_INTC_GPIO(2):
+ mask |= 1 << 2;
+ break;
+ case S6_INTC_GPIO(3):
+ mask |= 0x1f << 3;
+ break;
+ default:
+ alone = 0;
+ }
+ if (mask) {
+ int cirq, i;
+ if (!alone) {
+ printk(KERN_ERR "chained irq chips can't share"
+ " parent irq %i\n", irq);
+ continue;
+ }
+ demux_masks[n] = mask;
+ cirq = IRQ_BASE - 1;
+ do {
+ i = ffs(mask);
+ cirq += i;
+ mask >>= i;
+ set_irq_chip(cirq, &gpioirqs);
+ set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+ } while (mask);
+ set_irq_data(irq, demux_masks + n);
+ set_irq_chained_handler(irq, demux_irqs);
+ if (++n == ARRAY_SIZE(demux_masks))
+ break;
+ }
+ }
+}
diff --git a/arch/xtensa/variants/s6000/include/variant/dmac.h b/arch/xtensa/variants/s6000/include/variant/dmac.h
new file mode 100644
index 0000000..89ab948
--- /dev/null
+++ b/arch/xtensa/variants/s6000/include/variant/dmac.h
@@ -0,0 +1,387 @@
+/*
+ * include/asm-xtensa/variant-s6000/dmac.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ * Copyright (C) 2008 Emlix GmbH <info@emlix.com>
+ * Authors: Fabian Godehardt <fg@emlix.com>
+ * Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ */
+
+#ifndef __ASM_XTENSA_S6000_DMAC_H
+#define __ASM_XTENSA_S6000_DMAC_H
+#include <linux/io.h>
+#include <variant/hardware.h>
+
+/* DMA global */
+
+#define S6_DMA_INTSTAT0 0x000
+#define S6_DMA_INTSTAT1 0x004
+#define S6_DMA_INTENABLE0 0x008
+#define S6_DMA_INTENABLE1 0x00C
+#define S6_DMA_INTRAW0 0x010
+#define S6_DMA_INTRAW1 0x014
+#define S6_DMA_INTCLEAR0 0x018
+#define S6_DMA_INTCLEAR1 0x01C
+#define S6_DMA_INTSET0 0x020
+#define S6_DMA_INTSET1 0x024
+#define S6_DMA_INT0_UNDER 0
+#define S6_DMA_INT0_OVER 16
+#define S6_DMA_INT1_CHANNEL 0
+#define S6_DMA_INT1_MASTER 16
+#define S6_DMA_INT1_MASTER_MASK 7
+#define S6_DMA_TERMCNTIRQSTAT 0x028
+#define S6_DMA_TERMCNTIRQCLR 0x02C
+#define S6_DMA_TERMCNTIRQSET 0x030
+#define S6_DMA_PENDCNTIRQSTAT 0x034
+#define S6_DMA_PENDCNTIRQCLR 0x038
+#define S6_DMA_PENDCNTIRQSET 0x03C
+#define S6_DMA_LOWWMRKIRQSTAT 0x040
+#define S6_DMA_LOWWMRKIRQCLR 0x044
+#define S6_DMA_LOWWMRKIRQSET 0x048
+#define S6_DMA_MASTERERRINFO 0x04C
+#define S6_DMA_MASTERERR_CHAN(n) (4*(n))
+#define S6_DMA_MASTERERR_CHAN_MASK 0xF
+#define S6_DMA_DESCRFIFO0 0x050
+#define S6_DMA_DESCRFIFO1 0x054
+#define S6_DMA_DESCRFIFO2 0x058
+#define S6_DMA_DESCRFIFO2_AUTODISABLE 24
+#define S6_DMA_DESCRFIFO3 0x05C
+#define S6_DMA_MASTER0START 0x060
+#define S6_DMA_MASTER0END 0x064
+#define S6_DMA_MASTER1START 0x068
+#define S6_DMA_MASTER1END 0x06C
+#define S6_DMA_NEXTFREE 0x070
+#define S6_DMA_NEXTFREE_CHAN 0
+#define S6_DMA_NEXTFREE_CHAN_MASK 0x1F
+#define S6_DMA_NEXTFREE_ENA 16
+#define S6_DMA_NEXTFREE_ENA_MASK ((1 << 16) - 1)
+#define S6_DMA_DPORTCTRLGRP(p) ((p) * 4 + 0x074)
+#define S6_DMA_DPORTCTRLGRP_FRAMEREP 0
+#define S6_DMA_DPORTCTRLGRP_NRCHANS 1
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_1 0
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_3 1
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_4 2
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_2 3
+#define S6_DMA_DPORTCTRLGRP_ENA 31
+
+
+/* DMA per channel */
+
+#define DMA_CHNL(dmac, n) ((dmac) + 0x1000 + (n) * 0x100)
+#define DMA_INDEX_CHNL(addr) (((addr) >> 8) & 0xF)
+#define DMA_MASK_DMAC(addr) ((addr) & 0xFFFF0000)
+#define S6_DMA_CHNCTRL 0x000
+#define S6_DMA_CHNCTRL_ENABLE 0
+#define S6_DMA_CHNCTRL_PAUSE 1
+#define S6_DMA_CHNCTRL_PRIO 2
+#define S6_DMA_CHNCTRL_PRIO_MASK 3
+#define S6_DMA_CHNCTRL_PERIPHXFER 4
+#define S6_DMA_CHNCTRL_PERIPHENA 5
+#define S6_DMA_CHNCTRL_SRCINC 6
+#define S6_DMA_CHNCTRL_DSTINC 7
+#define S6_DMA_CHNCTRL_BURSTLOG 8
+#define S6_DMA_CHNCTRL_BURSTLOG_MASK 7
+#define S6_DMA_CHNCTRL_DESCFIFODEPTH 12
+#define S6_DMA_CHNCTRL_DESCFIFODEPTH_MASK 0x1F
+#define S6_DMA_CHNCTRL_DESCFIFOFULL 17
+#define S6_DMA_CHNCTRL_BWCONSEL 18
+#define S6_DMA_CHNCTRL_BWCONENA 19
+#define S6_DMA_CHNCTRL_PENDGCNTSTAT 20
+#define S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK 0x3F
+#define S6_DMA_CHNCTRL_LOWWMARK 26
+#define S6_DMA_CHNCTRL_LOWWMARK_MASK 0xF
+#define S6_DMA_CHNCTRL_TSTAMP 30
+#define S6_DMA_TERMCNTNB 0x004
+#define S6_DMA_TERMCNTNB_MASK 0xFFFF
+#define S6_DMA_TERMCNTTMO 0x008
+#define S6_DMA_TERMCNTSTAT 0x00C
+#define S6_DMA_TERMCNTSTAT_MASK 0xFF
+#define S6_DMA_CMONCHUNK 0x010
+#define S6_DMA_SRCSKIP 0x014
+#define S6_DMA_DSTSKIP 0x018
+#define S6_DMA_CUR_SRC 0x024
+#define S6_DMA_CUR_DST 0x028
+#define S6_DMA_TIMESTAMP 0x030
+
+/* DMA channel lists */
+
+#define S6_DPDMA_CHAN(stream, channel) (4 * (stream) + (channel))
+#define S6_DPDMA_NB 16
+
+#define S6_HIFDMA_GMACTX 0
+#define S6_HIFDMA_GMACRX 1
+#define S6_HIFDMA_I2S0 2
+#define S6_HIFDMA_I2S1 3
+#define S6_HIFDMA_EGIB 4
+#define S6_HIFDMA_PCITX 5
+#define S6_HIFDMA_PCIRX 6
+#define S6_HIFDMA_NB 7
+
+#define S6_NIDMA_NB 4
+
+#define S6_LMSDMA_NB 12
+
+/* controller access */
+
+#define S6_DMAC_NB 4
+#define S6_DMAC_INDEX(dmac) (((unsigned)(dmac) >> 18) % S6_DMAC_NB)
+
+struct s6dmac_ctrl {
+ u32 dmac;
+ spinlock_t lock;
+ u8 chan_nb;
+};
+
+extern struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
+
+
+/* DMA control, per channel */
+
+static inline int s6dmac_fifo_full(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ & (1 << S6_DMA_CHNCTRL_DESCFIFOFULL)) && 1;
+}
+
+static inline int s6dmac_termcnt_irq(u32 dmac, int chan)
+{
+ u32 m = 1 << chan;
+ int r = (readl(dmac + S6_DMA_TERMCNTIRQSTAT) & m) && 1;
+ if (r)
+ writel(m, dmac + S6_DMA_TERMCNTIRQCLR);
+ return r;
+}
+
+static inline int s6dmac_pendcnt_irq(u32 dmac, int chan)
+{
+ u32 m = 1 << chan;
+ int r = (readl(dmac + S6_DMA_PENDCNTIRQSTAT) & m) && 1;
+ if (r)
+ writel(m, dmac + S6_DMA_PENDCNTIRQCLR);
+ return r;
+}
+
+static inline int s6dmac_lowwmark_irq(u32 dmac, int chan)
+{
+ int r = (readl(dmac + S6_DMA_LOWWMRKIRQSTAT) & (1 << chan)) ? 1 : 0;
+ if (r)
+ writel(1 << chan, dmac + S6_DMA_LOWWMRKIRQCLR);
+ return r;
+}
+
+static inline u32 s6dmac_pending_count(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ >> S6_DMA_CHNCTRL_PENDGCNTSTAT)
+ & S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK;
+}
+
+static inline void s6dmac_set_terminal_count(u32 dmac, int chan, u32 n)
+{
+ n &= S6_DMA_TERMCNTNB_MASK;
+ n |= readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB)
+ & ~S6_DMA_TERMCNTNB_MASK;
+ writel(n, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
+}
+
+static inline u32 s6dmac_get_terminal_count(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB))
+ & S6_DMA_TERMCNTNB_MASK;
+}
+
+static inline u32 s6dmac_timestamp(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_TIMESTAMP);
+}
+
+static inline u32 s6dmac_cur_src(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_SRC);
+}
+
+static inline u32 s6dmac_cur_dst(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_DST);
+}
+
+static inline void s6dmac_disable_chan(u32 dmac, int chan)
+{
+ u32 ctrl;
+ writel(readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ & ~(1 << S6_DMA_CHNCTRL_ENABLE),
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ do
+ ctrl = readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ while (ctrl & (1 << S6_DMA_CHNCTRL_ENABLE));
+}
+
+static inline void s6dmac_set_stride_skip(u32 dmac, int chan,
+ int comchunk, /* 0: disable scatter/gather */
+ int srcskip, int dstskip)
+{
+ writel(comchunk, DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ writel(srcskip, DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
+ writel(dstskip, DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
+}
+
+static inline void s6dmac_enable_chan(u32 dmac, int chan,
+ int prio, /* 0 (highest) .. 3 (lowest) */
+ int periphxfer, /* <0: disable p.req.line, 0..1: mode */
+ int srcinc, int dstinc, /* 0: dont increment src/dst address */
+ int comchunk, /* 0: disable scatter/gather */
+ int srcskip, int dstskip,
+ int burstsize, /* 4 for I2S, 7 for everything else */
+ int bandwidthconserve, /* <0: disable, 0..1: select */
+ int lowwmark, /* 0..15 */
+ int timestamp, /* 0: disable timestamp */
+ int enable) /* 0: disable for now */
+{
+ writel(1, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
+ writel(0, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTTMO);
+ writel(lowwmark << S6_DMA_CHNCTRL_LOWWMARK,
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ s6dmac_set_stride_skip(dmac, chan, comchunk, srcskip, dstskip);
+ writel(((enable ? 1 : 0) << S6_DMA_CHNCTRL_ENABLE) |
+ (prio << S6_DMA_CHNCTRL_PRIO) |
+ (((periphxfer > 0) ? 1 : 0) << S6_DMA_CHNCTRL_PERIPHXFER) |
+ (((periphxfer < 0) ? 0 : 1) << S6_DMA_CHNCTRL_PERIPHENA) |
+ ((srcinc ? 1 : 0) << S6_DMA_CHNCTRL_SRCINC) |
+ ((dstinc ? 1 : 0) << S6_DMA_CHNCTRL_DSTINC) |
+ (burstsize << S6_DMA_CHNCTRL_BURSTLOG) |
+ (((bandwidthconserve > 0) ? 1 : 0) << S6_DMA_CHNCTRL_BWCONSEL) |
+ (((bandwidthconserve < 0) ? 0 : 1) << S6_DMA_CHNCTRL_BWCONENA) |
+ (lowwmark << S6_DMA_CHNCTRL_LOWWMARK) |
+ ((timestamp ? 1 : 0) << S6_DMA_CHNCTRL_TSTAMP),
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+}
+
+
+/* DMA control, per engine */
+
+static inline unsigned _dmac_addr_index(u32 dmac)
+{
+ unsigned i = S6_DMAC_INDEX(dmac);
+ if (s6dmac_ctrl[i].dmac != dmac)
+ BUG();
+ return i;
+}
+
+static inline void _s6dmac_disable_error_irqs(u32 dmac, u32 mask)
+{
+ writel(mask, dmac + S6_DMA_TERMCNTIRQCLR);
+ writel(mask, dmac + S6_DMA_PENDCNTIRQCLR);
+ writel(mask, dmac + S6_DMA_LOWWMRKIRQCLR);
+ writel(readl(dmac + S6_DMA_INTENABLE0)
+ & ~((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER)),
+ dmac + S6_DMA_INTENABLE0);
+ writel(readl(dmac + S6_DMA_INTENABLE1) & ~(mask << S6_DMA_INT1_CHANNEL),
+ dmac + S6_DMA_INTENABLE1);
+ writel((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER),
+ dmac + S6_DMA_INTCLEAR0);
+ writel(mask << S6_DMA_INT1_CHANNEL, dmac + S6_DMA_INTCLEAR1);
+}
+
+/*
+ * request channel from specified engine
+ * with chan<0, accept any channel
+ * further parameters see s6dmac_enable_chan
+ * returns < 0 upon error, channel nb otherwise
+ */
+static inline int s6dmac_request_chan(u32 dmac, int chan,
+ int prio,
+ int periphxfer,
+ int srcinc, int dstinc,
+ int comchunk,
+ int srcskip, int dstskip,
+ int burstsize,
+ int bandwidthconserve,
+ int lowwmark,
+ int timestamp,
+ int enable)
+{
+ int r = chan;
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ if (r < 0) {
+ r = (readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_CHAN)
+ & S6_DMA_NEXTFREE_CHAN_MASK;
+ }
+ if (r >= s6dmac_ctrl[_dmac_addr_index(dmac)].chan_nb) {
+ if (chan < 0)
+ r = -EBUSY;
+ else
+ r = -ENXIO;
+ } else if (((readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_ENA)
+ >> r) & 1) {
+ r = -EBUSY;
+ } else {
+ s6dmac_enable_chan(dmac, r, prio, periphxfer,
+ srcinc, dstinc, comchunk, srcskip, dstskip, burstsize,
+ bandwidthconserve, lowwmark, timestamp, enable);
+ }
+ spin_unlock_irqrestore(spinl, flags);
+ return r;
+}
+
+static inline void s6dmac_put_fifo(u32 dmac, int chan,
+ u32 src, u32 dst, u32 size)
+{
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ writel(src, dmac + S6_DMA_DESCRFIFO0);
+ writel(dst, dmac + S6_DMA_DESCRFIFO1);
+ writel(size, dmac + S6_DMA_DESCRFIFO2);
+ writel(chan, dmac + S6_DMA_DESCRFIFO3);
+ spin_unlock_irqrestore(spinl, flags);
+}
+
+static inline u32 s6dmac_channel_enabled(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) &
+ (1 << S6_DMA_CHNCTRL_ENABLE);
+}
+
+/*
+ * group 1-4 data port channels
+ * with port=0..3, nrch=1-4 channels,
+ * frrep=0/1 (dis- or enable frame repeat)
+ */
+static inline void s6dmac_dp_setup_group(u32 dmac, int port,
+ int nrch, int frrep)
+{
+ const static u8 mask[4] = {0, 3, 1, 2};
+ BUG_ON(dmac != S6_REG_DPDMA);
+ if ((port < 0) || (port > 3) || (nrch < 1) || (nrch > 4))
+ return;
+ writel((mask[nrch - 1] << S6_DMA_DPORTCTRLGRP_NRCHANS)
+ | ((frrep ? 1 : 0) << S6_DMA_DPORTCTRLGRP_FRAMEREP),
+ dmac + S6_DMA_DPORTCTRLGRP(port));
+}
+
+static inline void s6dmac_dp_switch_group(u32 dmac, int port, int enable)
+{
+ u32 tmp;
+ BUG_ON(dmac != S6_REG_DPDMA);
+ tmp = readl(dmac + S6_DMA_DPORTCTRLGRP(port));
+ if (enable)
+ tmp |= (1 << S6_DMA_DPORTCTRLGRP_ENA);
+ else
+ tmp &= ~(1 << S6_DMA_DPORTCTRLGRP_ENA);
+ writel(tmp, dmac + S6_DMA_DPORTCTRLGRP(port));
+}
+
+extern void s6dmac_put_fifo_cache(u32 dmac, int chan,
+ u32 src, u32 dst, u32 size);
+extern void s6dmac_disable_error_irqs(u32 dmac, u32 mask);
+extern u32 s6dmac_int_sources(u32 dmac, u32 channel);
+extern void s6dmac_release_chan(u32 dmac, int chan);
+
+#endif /* __ASM_XTENSA_S6000_DMAC_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/gpio.h b/arch/xtensa/variants/s6000/include/variant/gpio.h
index 8327f62..8484ab0 100644
--- a/arch/xtensa/variants/s6000/include/variant/gpio.h
+++ b/arch/xtensa/variants/s6000/include/variant/gpio.h
@@ -1,6 +1,6 @@
#ifndef _XTENSA_VARIANT_S6000_GPIO_H
#define _XTENSA_VARIANT_S6000_GPIO_H
-extern int s6_gpio_init(void);
+extern int s6_gpio_init(u32 afsel);
#endif /* _XTENSA_VARIANT_S6000_GPIO_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/irq.h b/arch/xtensa/variants/s6000/include/variant/irq.h
index fa031cb..97d6fc4 100644
--- a/arch/xtensa/variants/s6000/include/variant/irq.h
+++ b/arch/xtensa/variants/s6000/include/variant/irq.h
@@ -1,9 +1,9 @@
-#ifndef __XTENSA_S6000_IRQ_H
-#define __XTENSA_S6000_IRQ_H
+#ifndef _XTENSA_S6000_IRQ_H
+#define _XTENSA_S6000_IRQ_H
#define NO_IRQ (-1)
+#define VARIANT_NR_IRQS 8 /* GPIO interrupts */
extern void variant_irq_enable(unsigned int irq);
-extern void variant_irq_disable(unsigned int irq);
#endif /* __XTENSA_S6000_IRQ_H */
diff --git a/block/bsg.c b/block/bsg.c
index 54106f0..e7d4752 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -315,7 +315,6 @@
blk_put_request(rq);
if (next_rq) {
blk_rq_unmap_user(next_rq->bio);
- next_rq->bio = NULL;
blk_put_request(next_rq);
}
return ERR_PTR(ret);
@@ -449,7 +448,6 @@
hdr->dout_resid = rq->resid_len;
hdr->din_resid = rq->next_rq->resid_len;
blk_rq_unmap_user(bidi_bio);
- rq->next_rq->bio = NULL;
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
hdr->din_resid = rq->resid_len;
@@ -468,7 +466,6 @@
blk_rq_unmap_user(bio);
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
- rq->bio = NULL;
blk_put_request(rq);
return ret;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 431f8b4..7ec7d88 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -266,6 +266,7 @@
config ACPI_PCI_SLOT
tristate "PCI slot detection driver"
+ depends on SYSFS
default n
help
This driver creates entries in /sys/bus/pci/slots/ for all PCI
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 88e42ab..0df8fcb 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -61,6 +61,7 @@
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device, int type);
static int acpi_ac_resume(struct acpi_device *device);
+static void acpi_ac_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
@@ -72,10 +73,12 @@
.name = "ac",
.class = ACPI_AC_CLASS,
.ids = ac_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
.resume = acpi_ac_resume,
+ .notify = acpi_ac_notify,
},
};
@@ -220,16 +223,14 @@
Driver Model
-------------------------------------------------------------------------- */
-static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
+static void acpi_ac_notify(struct acpi_device *device, u32 event)
{
- struct acpi_ac *ac = data;
- struct acpi_device *device = NULL;
+ struct acpi_ac *ac = acpi_driver_data(device);
if (!ac)
return;
- device = ac->device;
switch (event) {
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -253,7 +254,6 @@
static int acpi_ac_add(struct acpi_device *device)
{
int result = 0;
- acpi_status status = AE_OK;
struct acpi_ac *ac = NULL;
@@ -286,13 +286,6 @@
ac->charger.get_property = get_ac_property;
power_supply_register(&ac->device->dev, &ac->charger);
#endif
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY, acpi_ac_notify,
- ac);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
@@ -328,7 +321,6 @@
static int acpi_ac_remove(struct acpi_device *device, int type)
{
- acpi_status status = AE_OK;
struct acpi_ac *ac = NULL;
@@ -337,8 +329,6 @@
ac = acpi_driver_data(device);
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY, acpi_ac_notify);
#ifdef CONFIG_ACPI_SYSFS_POWER
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b0de631..58b4517 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -796,13 +796,12 @@
Driver Interface
-------------------------------------------------------------------------- */
-static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
+static void acpi_battery_notify(struct acpi_device *device, u32 event)
{
- struct acpi_battery *battery = data;
- struct acpi_device *device;
+ struct acpi_battery *battery = acpi_driver_data(device);
+
if (!battery)
return;
- device = battery->device;
acpi_battery_update(battery);
acpi_bus_generate_proc_event(device, event,
acpi_battery_present(battery));
@@ -819,7 +818,6 @@
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
- acpi_status status = 0;
struct acpi_battery *battery = NULL;
if (!device)
return -EINVAL;
@@ -834,22 +832,12 @@
acpi_battery_update(battery);
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
- if (result)
- goto end;
#endif
- status = acpi_install_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- acpi_battery_notify, battery);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status, "Installing notify handler"));
- result = -ENODEV;
- goto end;
- }
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
- ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
- device->status.battery_present ? "present" : "absent");
- end:
- if (result) {
+ if (!result) {
+ printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ device->status.battery_present ? "present" : "absent");
+ } else {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
@@ -860,15 +848,11 @@
static int acpi_battery_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
struct acpi_battery *battery = NULL;
if (!device || !acpi_driver_data(device))
return -EINVAL;
battery = acpi_driver_data(device);
- status = acpi_remove_notify_handler(device->handle,
- ACPI_ALL_NOTIFY,
- acpi_battery_notify);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
@@ -896,10 +880,12 @@
.name = "battery",
.class = ACPI_BATTERY_CLASS,
.ids = battery_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_battery_add,
.resume = acpi_battery_resume,
.remove = acpi_battery_remove,
+ .notify = acpi_battery_notify,
},
};
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 09c6980..f6baa77 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -192,6 +192,22 @@
DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
},
},
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-NS10J_S",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-SR290J",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index ae862f1..2876fc7 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -450,18 +450,16 @@
Notification Handling
-------------------------------------------------------------------------- */
-static int
-acpi_bus_check_device(struct acpi_device *device, int *status_changed)
+static void acpi_bus_check_device(acpi_handle handle)
{
- acpi_status status = 0;
+ struct acpi_device *device;
+ acpi_status status;
struct acpi_device_status old_status;
-
+ if (acpi_bus_get_device(handle, &device))
+ return;
if (!device)
- return -EINVAL;
-
- if (status_changed)
- *status_changed = 0;
+ return;
old_status = device->status;
@@ -471,22 +469,15 @@
*/
if (device->parent && !device->parent->status.present) {
device->status = device->parent->status;
- if (STRUCT_TO_INT(old_status) != STRUCT_TO_INT(device->status)) {
- if (status_changed)
- *status_changed = 1;
- }
- return 0;
+ return;
}
status = acpi_bus_get_status(device);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return;
if (STRUCT_TO_INT(old_status) == STRUCT_TO_INT(device->status))
- return 0;
-
- if (status_changed)
- *status_changed = 1;
+ return;
/*
* Device Insertion/Removal
@@ -498,33 +489,17 @@
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device removal detected\n"));
/* TBD: Handle device removal */
}
-
- return 0;
}
-static int acpi_bus_check_scope(struct acpi_device *device)
+static void acpi_bus_check_scope(acpi_handle handle)
{
- int result = 0;
- int status_changed = 0;
-
-
- if (!device)
- return -EINVAL;
-
/* Status Change? */
- result = acpi_bus_check_device(device, &status_changed);
- if (result)
- return result;
-
- if (!status_changed)
- return 0;
+ acpi_bus_check_device(handle);
/*
* TBD: Enumerate child devices within this device's scope and
* run acpi_bus_check_device()'s on them.
*/
-
- return 0;
}
static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
@@ -547,22 +522,19 @@
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
- int result = 0;
struct acpi_device *device = NULL;
+ struct acpi_driver *driver;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
+ type, handle));
blocking_notifier_call_chain(&acpi_bus_notify_list,
type, (void *)handle);
- if (acpi_bus_get_device(handle, &device))
- return;
-
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received BUS CHECK notification for device [%s]\n",
- device->pnp.bus_id));
- result = acpi_bus_check_scope(device);
+ acpi_bus_check_scope(handle);
/*
* TBD: We'll need to outsource certain events to non-ACPI
* drivers via the device manager (device.c).
@@ -570,10 +542,7 @@
break;
case ACPI_NOTIFY_DEVICE_CHECK:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE CHECK notification for device [%s]\n",
- device->pnp.bus_id));
- result = acpi_bus_check_device(device, NULL);
+ acpi_bus_check_device(handle);
/*
* TBD: We'll need to outsource certain events to non-ACPI
* drivers via the device manager (device.c).
@@ -581,44 +550,26 @@
break;
case ACPI_NOTIFY_DEVICE_WAKE:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE WAKE notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_EJECT_REQUEST:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received EJECT REQUEST notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received DEVICE CHECK LIGHT notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD: Exactly what does 'light' mean? */
break;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received FREQUENCY MISMATCH notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received BUS MODE MISMATCH notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
case ACPI_NOTIFY_POWER_FAULT:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Received POWER FAULT notification for device [%s]\n",
- device->pnp.bus_id));
/* TBD */
break;
@@ -629,7 +580,13 @@
break;
}
- return;
+ acpi_bus_get_device(handle, &device);
+ if (device) {
+ driver = device->driver;
+ if (driver && driver->ops.notify &&
+ (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+ driver->ops.notify(device, type);
+ }
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 8bd2c2a..a8a5c29 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -140,46 +140,6 @@
EXPORT_SYMBOL(acpi_get_physical_device);
-/* ToDo: When a PCI bridge is found, return the PCI device behind the bridge
- * This should work in general, but did not on a Lenovo T61 for the
- * graphics card. But this must be fixed when the PCI device is
- * bound and the kernel device struct is attached to the acpi device
- * Note: A success call will increase reference count by one
- * Do call put_device(dev) on the returned device then
- */
-struct device *acpi_get_physical_pci_device(acpi_handle handle)
-{
- struct device *dev;
- long long device_id;
- acpi_status status;
-
- status =
- acpi_evaluate_integer(handle, "_ADR", NULL, &device_id);
-
- if (ACPI_FAILURE(status))
- return NULL;
-
- /* We need to attempt to determine whether the _ADR refers to a
- PCI device or not. There's no terribly good way to do this,
- so the best we can hope for is to assume that there'll never
- be a device in the host bridge */
- if (device_id >= 0x10000) {
- /* It looks like a PCI device. Does it exist? */
- dev = acpi_get_physical_device(handle);
- } else {
- /* It doesn't look like a PCI device. Does its parent
- exist? */
- acpi_handle phandle;
- if (acpi_get_parent(handle, &phandle))
- return NULL;
- dev = acpi_get_physical_device(phandle);
- }
- if (!dev)
- return NULL;
- return dev;
-}
-EXPORT_SYMBOL(acpi_get_physical_pci_device);
-
static int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index d916bea..7167071 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -79,6 +79,7 @@
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
+static struct workqueue_struct *kacpi_hotplug_wq;
struct acpi_res_list {
resource_size_t start;
@@ -192,8 +193,10 @@
{
kacpid_wq = create_singlethread_workqueue("kacpid");
kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
+ kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
+ BUG_ON(!kacpi_hotplug_wq);
return AE_OK;
}
@@ -206,6 +209,7 @@
destroy_workqueue(kacpid_wq);
destroy_workqueue(kacpi_notify_wq);
+ destroy_workqueue(kacpi_hotplug_wq);
return AE_OK;
}
@@ -716,6 +720,7 @@
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
struct workqueue_struct *queue;
+ work_func_t func;
int ret;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
@@ -740,15 +745,17 @@
dpc->function = function;
dpc->context = context;
- if (!hp) {
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- queue = (type == OSL_NOTIFY_HANDLER) ?
- kacpi_notify_wq : kacpid_wq;
- ret = queue_work(queue, &dpc->work);
- } else {
- INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred);
- ret = schedule_work(&dpc->work);
- }
+ /*
+ * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
+ * because the hotplug code may call driver .remove() functions,
+ * which invoke flush_scheduled_work/acpi_os_wait_events_complete
+ * to flush these workqueues.
+ */
+ queue = hp ? kacpi_hotplug_wq :
+ (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
+ func = hp ? acpi_os_execute_hp_deferred : acpi_os_execute_deferred;
+ INIT_WORK(&dpc->work, func);
+ ret = queue_work(queue, &dpc->work);
if (!ret) {
printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index bc46de3..a5a77b7 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -24,12 +24,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/spinlock.h>
-#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
@@ -38,310 +33,76 @@
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_bind");
-struct acpi_pci_data {
- struct acpi_pci_id id;
+static int acpi_pci_unbind(struct acpi_device *device)
+{
+ struct pci_dev *dev;
+
+ dev = acpi_get_pci_dev(device->handle);
+ if (!dev || !dev->subordinate)
+ goto out;
+
+ acpi_pci_irq_del_prt(dev->subordinate);
+
+ device->ops.bind = NULL;
+ device->ops.unbind = NULL;
+
+out:
+ pci_dev_put(dev);
+ return 0;
+}
+
+static int acpi_pci_bind(struct acpi_device *device)
+{
+ acpi_status status;
+ acpi_handle handle;
struct pci_bus *bus;
struct pci_dev *dev;
-};
-static int acpi_pci_unbind(struct acpi_device *device);
-
-static void acpi_pci_data_handler(acpi_handle handle, u32 function,
- void *context)
-{
-
- /* TBD: Anything we need to do here? */
-
- return;
-}
-
-/**
- * acpi_get_pci_id
- * ------------------
- * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem)
- * to resolve PCI information for ACPI-PCI devices defined in the namespace.
- * This typically occurs when resolving PCI operation region information.
- */
-acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id)
-{
- int result = 0;
- acpi_status status = AE_OK;
- struct acpi_device *device = NULL;
- struct acpi_pci_data *data = NULL;
-
-
- if (!id)
- return AE_BAD_PARAMETER;
-
- result = acpi_bus_get_device(handle, &device);
- if (result) {
- printk(KERN_ERR PREFIX
- "Invalid ACPI Bus context for device %s\n",
- acpi_device_bid(device));
- return AE_NOT_EXIST;
- }
-
- status = acpi_get_data(handle, acpi_pci_data_handler, (void **)&data);
- if (ACPI_FAILURE(status) || !data) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Invalid ACPI-PCI context for device %s",
- acpi_device_bid(device)));
- return status;
- }
-
- *id = data->id;
+ dev = acpi_get_pci_dev(device->handle);
+ if (!dev)
+ return 0;
/*
- id->segment = data->id.segment;
- id->bus = data->id.bus;
- id->device = data->id.device;
- id->function = data->id.function;
+ * Install the 'bind' function to facilitate callbacks for
+ * children of the P2P bridge.
*/
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device %s has PCI address %04x:%02x:%02x.%d\n",
- acpi_device_bid(device), id->segment, id->bus,
- id->device, id->function));
-
- return AE_OK;
-}
-
-EXPORT_SYMBOL(acpi_get_pci_id);
-
-int acpi_pci_bind(struct acpi_device *device)
-{
- int result = 0;
- acpi_status status;
- struct acpi_pci_data *data;
- struct acpi_pci_data *pdata;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_handle handle;
-
- if (!device || !device->parent)
- return -EINVAL;
-
- data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
- if (ACPI_FAILURE(status)) {
- kfree(data);
- return -ENODEV;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI device [%s]...\n",
- (char *)buffer.pointer));
-
- /*
- * Segment & Bus
- * -------------
- * These are obtained via the parent device's ACPI-PCI context.
- */
- status = acpi_get_data(device->parent->handle, acpi_pci_data_handler,
- (void **)&pdata);
- if (ACPI_FAILURE(status) || !pdata || !pdata->bus) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Invalid ACPI-PCI context for parent device %s",
- acpi_device_bid(device->parent)));
- result = -ENODEV;
- goto end;
- }
- data->id.segment = pdata->id.segment;
- data->id.bus = pdata->bus->number;
-
- /*
- * Device & Function
- * -----------------
- * These are simply obtained from the device's _ADR method. Note
- * that a value of zero is valid.
- */
- data->id.device = device->pnp.bus_address >> 16;
- data->id.function = device->pnp.bus_address & 0xFFFF;
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "...to %04x:%02x:%02x.%d\n",
- data->id.segment, data->id.bus, data->id.device,
- data->id.function));
-
- /*
- * TBD: Support slot devices (e.g. function=0xFFFF).
- */
-
- /*
- * Locate PCI Device
- * -----------------
- * Locate matching device in PCI namespace. If it doesn't exist
- * this typically means that the device isn't currently inserted
- * (e.g. docking station, port replicator, etc.).
- */
- data->dev = pci_get_slot(pdata->bus,
- PCI_DEVFN(data->id.device, data->id.function));
- if (!data->dev) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device %04x:%02x:%02x.%d not present in PCI namespace\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function));
- result = -ENODEV;
- goto end;
- }
- if (!data->dev->bus) {
- printk(KERN_ERR PREFIX
- "Device %04x:%02x:%02x.%d has invalid 'bus' field\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function);
- result = -ENODEV;
- goto end;
- }
-
- /*
- * PCI Bridge?
- * -----------
- * If so, set the 'bus' field and install the 'bind' function to
- * facilitate callbacks for all of its children.
- */
- if (data->dev->subordinate) {
+ if (dev->subordinate) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device %04x:%02x:%02x.%d is a PCI bridge\n",
- data->id.segment, data->id.bus,
- data->id.device, data->id.function));
- data->bus = data->dev->subordinate;
+ pci_domain_nr(dev->bus), dev->bus->number,
+ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)));
device->ops.bind = acpi_pci_bind;
device->ops.unbind = acpi_pci_unbind;
}
/*
- * Attach ACPI-PCI Context
- * -----------------------
- * Thus binding the ACPI and PCI devices.
- */
- status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to attach ACPI-PCI context to device %s",
- acpi_device_bid(device)));
- result = -ENODEV;
- goto end;
- }
-
- /*
- * PCI Routing Table
- * -----------------
- * Evaluate and parse _PRT, if exists. This code is independent of
- * PCI bridges (above) to allow parsing of _PRT objects within the
- * scope of non-bridge devices. Note that _PRTs within the scope of
- * a PCI bridge assume the bridge's subordinate bus number.
+ * Evaluate and parse _PRT, if exists. This code allows parsing of
+ * _PRT objects within the scope of non-bridge devices. Note that
+ * _PRTs within the scope of a PCI bridge assume the bridge's
+ * subordinate bus number.
*
* TBD: Can _PRTs exist within the scope of non-bridge PCI devices?
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
- if (ACPI_SUCCESS(status)) {
- if (data->bus) /* PCI-PCI bridge */
- acpi_pci_irq_add_prt(device->handle, data->id.segment,
- data->bus->number);
- else /* non-bridge PCI device */
- acpi_pci_irq_add_prt(device->handle, data->id.segment,
- data->id.bus);
- }
-
- end:
- kfree(buffer.pointer);
- if (result) {
- pci_dev_put(data->dev);
- kfree(data);
- }
- return result;
-}
-
-static int acpi_pci_unbind(struct acpi_device *device)
-{
- int result = 0;
- acpi_status status;
- struct acpi_pci_data *data;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
-
- if (!device || !device->parent)
- return -EINVAL;
-
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ goto out;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unbinding PCI device [%s]...\n",
- (char *) buffer.pointer));
- kfree(buffer.pointer);
+ if (dev->subordinate)
+ bus = dev->subordinate;
+ else
+ bus = dev->bus;
- status =
- acpi_get_data(device->handle, acpi_pci_data_handler,
- (void **)&data);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto end;
- }
+ acpi_pci_irq_add_prt(device->handle, bus);
- status = acpi_detach_data(device->handle, acpi_pci_data_handler);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to detach data from device %s",
- acpi_device_bid(device)));
- result = -ENODEV;
- goto end;
- }
- if (data->dev->subordinate) {
- acpi_pci_irq_del_prt(data->id.segment, data->bus->number);
- }
- pci_dev_put(data->dev);
- kfree(data);
-
- end:
- return result;
+out:
+ pci_dev_put(dev);
+ return 0;
}
-int
-acpi_pci_bind_root(struct acpi_device *device,
- struct acpi_pci_id *id, struct pci_bus *bus)
+int acpi_pci_bind_root(struct acpi_device *device)
{
- int result = 0;
- acpi_status status;
- struct acpi_pci_data *data = NULL;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
- if (!device || !id || !bus) {
- return -EINVAL;
- }
-
- data = kzalloc(sizeof(struct acpi_pci_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->id = *id;
- data->bus = bus;
device->ops.bind = acpi_pci_bind;
device->ops.unbind = acpi_pci_unbind;
- status = acpi_get_name(device->handle, ACPI_FULL_PATHNAME, &buffer);
- if (ACPI_FAILURE(status)) {
- kfree (data);
- return -ENODEV;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Binding PCI root bridge [%s] to "
- "%04x:%02x\n", (char *)buffer.pointer,
- id->segment, id->bus));
-
- status = acpi_attach_data(device->handle, acpi_pci_data_handler, data);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to attach ACPI-PCI context to device %s",
- (char *)buffer.pointer));
- result = -ENODEV;
- goto end;
- }
-
- end:
- kfree(buffer.pointer);
- if (result != 0)
- kfree(data);
-
- return result;
+ return 0;
}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 2faa9e2..b794eb8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -182,7 +182,7 @@
}
}
-static int acpi_pci_irq_add_entry(acpi_handle handle, int segment, int bus,
+static int acpi_pci_irq_add_entry(acpi_handle handle, struct pci_bus *bus,
struct acpi_pci_routing_table *prt)
{
struct acpi_prt_entry *entry;
@@ -196,8 +196,8 @@
* 1=INTA, 2=INTB. We use the PCI encoding throughout, so convert
* it here.
*/
- entry->id.segment = segment;
- entry->id.bus = bus;
+ entry->id.segment = pci_domain_nr(bus);
+ entry->id.bus = bus->number;
entry->id.device = (prt->address >> 16) & 0xFFFF;
entry->pin = prt->pin + 1;
@@ -242,7 +242,7 @@
return 0;
}
-int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus)
+int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -271,7 +271,7 @@
entry = buffer.pointer;
while (entry && (entry->length > 0)) {
- acpi_pci_irq_add_entry(handle, segment, bus, entry);
+ acpi_pci_irq_add_entry(handle, bus, entry);
entry = (struct acpi_pci_routing_table *)
((unsigned long)entry + entry->length);
}
@@ -280,16 +280,17 @@
return 0;
}
-void acpi_pci_irq_del_prt(int segment, int bus)
+void acpi_pci_irq_del_prt(struct pci_bus *bus)
{
struct acpi_prt_entry *entry, *tmp;
printk(KERN_DEBUG
"ACPI: Delete PCI Interrupt Routing Table for %04x:%02x\n",
- segment, bus);
+ pci_domain_nr(bus), bus->number);
spin_lock(&acpi_prt_lock);
list_for_each_entry_safe(entry, tmp, &acpi_prt_list, list) {
- if (segment == entry->id.segment && bus == entry->id.bus) {
+ if (pci_domain_nr(bus) == entry->id.segment
+ && bus->number == entry->id.bus) {
list_del(&entry->list);
kfree(entry);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 196f97d..55b5b90 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -63,9 +63,10 @@
struct acpi_pci_root {
struct list_head node;
- struct acpi_device * device;
- struct acpi_pci_id id;
+ struct acpi_device *device;
struct pci_bus *bus;
+ u16 segment;
+ u8 bus_nr;
u32 osc_support_set; /* _OSC state of support bits */
u32 osc_control_set; /* _OSC state of control bits */
@@ -82,7 +83,7 @@
int acpi_pci_register_driver(struct acpi_pci_driver *driver)
{
int n = 0;
- struct list_head *entry;
+ struct acpi_pci_root *root;
struct acpi_pci_driver **pptr = &sub_driver;
while (*pptr)
@@ -92,9 +93,7 @@
if (!driver->add)
return 0;
- list_for_each(entry, &acpi_pci_roots) {
- struct acpi_pci_root *root;
- root = list_entry(entry, struct acpi_pci_root, node);
+ list_for_each_entry(root, &acpi_pci_roots, node) {
driver->add(root->device->handle);
n++;
}
@@ -106,7 +105,7 @@
void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
{
- struct list_head *entry;
+ struct acpi_pci_root *root;
struct acpi_pci_driver **pptr = &sub_driver;
while (*pptr) {
@@ -120,28 +119,48 @@
if (!driver->remove)
return;
- list_for_each(entry, &acpi_pci_roots) {
- struct acpi_pci_root *root;
- root = list_entry(entry, struct acpi_pci_root, node);
+ list_for_each_entry(root, &acpi_pci_roots, node)
driver->remove(root->device->handle);
- }
}
EXPORT_SYMBOL(acpi_pci_unregister_driver);
acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
{
- struct acpi_pci_root *tmp;
+ struct acpi_pci_root *root;
- list_for_each_entry(tmp, &acpi_pci_roots, node) {
- if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus))
- return tmp->device->handle;
- }
+ list_for_each_entry(root, &acpi_pci_roots, node)
+ if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus))
+ return root->device->handle;
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
+/**
+ * acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
+ * @handle - the ACPI CA node in question.
+ *
+ * Note: we could make this API take a struct acpi_device * instead, but
+ * for now, it's more convenient to operate on an acpi_handle.
+ */
+int acpi_is_root_bridge(acpi_handle handle)
+{
+ int ret;
+ struct acpi_device *device;
+
+ ret = acpi_bus_get_device(handle, &device);
+ if (ret)
+ return 0;
+
+ ret = acpi_match_device_ids(device, root_device_ids);
+ if (ret)
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
+
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
@@ -161,19 +180,22 @@
return AE_OK;
}
-static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum)
+static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
+ unsigned long long *bus)
{
acpi_status status;
+ int busnum;
- *busnum = -1;
+ busnum = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
- get_root_bridge_busnr_callback, busnum);
+ get_root_bridge_busnr_callback, &busnum);
if (ACPI_FAILURE(status))
return status;
/* Check if we really get a bus number from _CRS */
- if (*busnum == -1)
+ if (busnum == -1)
return AE_ERROR;
+ *bus = busnum;
return AE_OK;
}
@@ -298,6 +320,7 @@
static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
{
struct acpi_pci_root *root;
+
list_for_each_entry(root, &acpi_pci_roots, node) {
if (root->device->handle == handle)
return root;
@@ -305,6 +328,87 @@
return NULL;
}
+struct acpi_handle_node {
+ struct list_head node;
+ acpi_handle handle;
+};
+
+/**
+ * acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
+ * @handle: the handle in question
+ *
+ * Given an ACPI CA handle, the desired PCI device is located in the
+ * list of PCI devices.
+ *
+ * If the device is found, its reference count is increased and this
+ * function returns a pointer to its data structure. The caller must
+ * decrement the reference count by calling pci_dev_put().
+ * If no device is found, %NULL is returned.
+ */
+struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
+{
+ int dev, fn;
+ unsigned long long adr;
+ acpi_status status;
+ acpi_handle phandle;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev = NULL;
+ struct acpi_handle_node *node, *tmp;
+ struct acpi_pci_root *root;
+ LIST_HEAD(device_list);
+
+ /*
+ * Walk up the ACPI CA namespace until we reach a PCI root bridge.
+ */
+ phandle = handle;
+ while (!acpi_is_root_bridge(phandle)) {
+ node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL);
+ if (!node)
+ goto out;
+
+ INIT_LIST_HEAD(&node->node);
+ node->handle = phandle;
+ list_add(&node->node, &device_list);
+
+ status = acpi_get_parent(phandle, &phandle);
+ if (ACPI_FAILURE(status))
+ goto out;
+ }
+
+ root = acpi_pci_find_root(phandle);
+ if (!root)
+ goto out;
+
+ pbus = root->bus;
+
+ /*
+ * Now, walk back down the PCI device tree until we return to our
+ * original handle. Assumes that everything between the PCI root
+ * bridge and the device we're looking for must be a P2P bridge.
+ */
+ list_for_each_entry(node, &device_list, node) {
+ acpi_handle hnd = node->handle;
+ status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status))
+ goto out;
+ dev = (adr >> 16) & 0xffff;
+ fn = adr & 0xffff;
+
+ pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
+ if (!pdev || hnd == handle)
+ break;
+
+ pbus = pdev->subordinate;
+ pci_dev_put(pdev);
+ }
+out:
+ list_for_each_entry_safe(node, tmp, &device_list, node)
+ kfree(node);
+
+ return pdev;
+}
+EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
+
/**
* acpi_pci_osc_control_set - commit requested control to Firmware
* @handle: acpi_handle for the target ACPI object
@@ -363,31 +467,46 @@
static int __devinit acpi_pci_root_add(struct acpi_device *device)
{
- int result = 0;
- struct acpi_pci_root *root = NULL;
- struct acpi_pci_root *tmp;
- acpi_status status = AE_OK;
- unsigned long long value = 0;
- acpi_handle handle = NULL;
+ unsigned long long segment, bus;
+ acpi_status status;
+ int result;
+ struct acpi_pci_root *root;
+ acpi_handle handle;
struct acpi_device *child;
u32 flags, base_flags;
+ segment = 0;
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
+ &segment);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
+ return -ENODEV;
+ }
- if (!device)
- return -EINVAL;
+ /* Check _CRS first, then _BBN. If no _BBN, default to zero. */
+ bus = 0;
+ status = try_get_root_bridge_busnr(device->handle, &bus);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk(KERN_ERR PREFIX
+ "no bus number in _CRS and can't evaluate _BBN\n");
+ return -ENODEV;
+ }
+ }
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
return -ENOMEM;
- INIT_LIST_HEAD(&root->node);
+ INIT_LIST_HEAD(&root->node);
root->device = device;
+ root->segment = segment & 0xFFFF;
+ root->bus_nr = bus & 0xFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
- device->ops.bind = acpi_pci_bind;
-
/*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
@@ -395,79 +514,6 @@
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
acpi_pci_osc_support(root, flags);
- /*
- * Segment
- * -------
- * Obtained via _SEG, if exists, otherwise assumed to be zero (0).
- */
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
- &value);
- switch (status) {
- case AE_OK:
- root->id.segment = (u16) value;
- break;
- case AE_NOT_FOUND:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Assuming segment 0 (no _SEG)\n"));
- root->id.segment = 0;
- break;
- default:
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SEG"));
- result = -ENODEV;
- goto end;
- }
-
- /*
- * Bus
- * ---
- * Obtained via _BBN, if exists, otherwise assumed to be zero (0).
- */
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL,
- &value);
- switch (status) {
- case AE_OK:
- root->id.bus = (u16) value;
- break;
- case AE_NOT_FOUND:
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Assuming bus 0 (no _BBN)\n"));
- root->id.bus = 0;
- break;
- default:
- ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BBN"));
- result = -ENODEV;
- goto end;
- }
-
- /* Some systems have wrong _BBN */
- list_for_each_entry(tmp, &acpi_pci_roots, node) {
- if ((tmp->id.segment == root->id.segment)
- && (tmp->id.bus == root->id.bus)) {
- int bus = 0;
- acpi_status status;
-
- printk(KERN_ERR PREFIX
- "Wrong _BBN value, reboot"
- " and use option 'pci=noacpi'\n");
-
- status = try_get_root_bridge_busnr(device->handle, &bus);
- if (ACPI_FAILURE(status))
- break;
- if (bus != root->id.bus) {
- printk(KERN_INFO PREFIX
- "PCI _CRS %d overrides _BBN 0\n", bus);
- root->id.bus = bus;
- }
- break;
- }
- }
- /*
- * Device & Function
- * -----------------
- * Obtained from _ADR (which has already been evaluated for us).
- */
- root->id.device = device->pnp.bus_address >> 16;
- root->id.function = device->pnp.bus_address & 0xFFFF;
-
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
*/
@@ -477,7 +523,7 @@
printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n",
acpi_device_name(device), acpi_device_bid(device),
- root->id.segment, root->id.bus);
+ root->segment, root->bus_nr);
/*
* Scan the Root Bridge
@@ -486,11 +532,11 @@
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
- root->bus = pci_acpi_scan_root(device, root->id.segment, root->id.bus);
+ root->bus = pci_acpi_scan_root(device, segment, bus);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
- root->id.segment, root->id.bus);
+ root->segment, root->bus_nr);
result = -ENODEV;
goto end;
}
@@ -500,7 +546,7 @@
* -----------------------
* Thus binding the ACPI and PCI devices.
*/
- result = acpi_pci_bind_root(device, &root->id, root->bus);
+ result = acpi_pci_bind_root(device);
if (result)
goto end;
@@ -511,8 +557,7 @@
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
if (ACPI_SUCCESS(status))
- result = acpi_pci_irq_add_prt(device->handle, root->id.segment,
- root->id.bus);
+ result = acpi_pci_irq_add_prt(device->handle, root->bus);
/*
* Scan and bind all _ADR-Based Devices
@@ -531,42 +576,28 @@
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
- end:
- if (result) {
- if (!list_empty(&root->node))
- list_del(&root->node);
- kfree(root);
- }
+ return 0;
+end:
+ if (!list_empty(&root->node))
+ list_del(&root->node);
+ kfree(root);
return result;
}
static int acpi_pci_root_start(struct acpi_device *device)
{
- struct acpi_pci_root *root;
+ struct acpi_pci_root *root = acpi_driver_data(device);
-
- list_for_each_entry(root, &acpi_pci_roots, node) {
- if (root->device == device) {
- pci_bus_add_devices(root->bus);
- return 0;
- }
- }
- return -ENODEV;
+ pci_bus_add_devices(root->bus);
+ return 0;
}
static int acpi_pci_root_remove(struct acpi_device *device, int type)
{
- struct acpi_pci_root *root = NULL;
-
-
- if (!device || !acpi_driver_data(device))
- return -EINVAL;
-
- root = acpi_driver_data(device);
+ struct acpi_pci_root *root = acpi_driver_data(device);
kfree(root);
-
return 0;
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 56665a6..d74365d 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -194,7 +194,7 @@
static int acpi_power_on(acpi_handle handle, struct acpi_device *dev)
{
- int result = 0, state;
+ int result = 0;
int found = 0;
acpi_status status = AE_OK;
struct acpi_power_resource *resource = NULL;
@@ -236,18 +236,6 @@
if (ACPI_FAILURE(status))
return -ENODEV;
- if (!acpi_power_nocheck) {
- /*
- * If acpi_power_nocheck is set, it is unnecessary to check
- * the power state after power transition.
- */
- result = acpi_power_get_state(resource->device->handle,
- &state);
- if (result)
- return result;
- if (state != ACPI_POWER_RESOURCE_STATE_ON)
- return -ENOEXEC;
- }
/* Update the power resource's _device_ power state */
resource->device->power.state = ACPI_STATE_D0;
@@ -258,7 +246,7 @@
static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
{
- int result = 0, state;
+ int result = 0;
acpi_status status = AE_OK;
struct acpi_power_resource *resource = NULL;
struct list_head *node, *next;
@@ -293,18 +281,6 @@
if (ACPI_FAILURE(status))
return -ENODEV;
- if (!acpi_power_nocheck) {
- /*
- * If acpi_power_nocheck is set, it is unnecessary to check
- * the power state after power transition.
- */
- result = acpi_power_get_state(handle, &state);
- if (result)
- return result;
- if (state != ACPI_POWER_RESOURCE_STATE_OFF)
- return -ENOEXEC;
- }
-
/* Update the power resource's _device_ power state */
resource->device->power.state = ACPI_STATE_D3;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 23f0fb8..84e0f3c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -89,7 +89,7 @@
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
- {ACPI_PROCESSOR_HID, 0},
+ {"ACPI0007", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
@@ -596,7 +596,21 @@
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No bus mastering arbitration control\n"));
- if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
+ if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
+ /* Declared with "Processor" statement; match ProcessorID */
+ status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Evaluating processor object\n");
+ return -ENODEV;
+ }
+
+ /*
+ * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
+ * >>> 'acpi_get_processor_id(acpi_id, &id)' in
+ * arch/xxx/acpi.c
+ */
+ pr->acpi_id = object.processor.proc_id;
+ } else {
/*
* Declared with "Device" statement; match _UID.
* Note that we don't handle string _UIDs yet.
@@ -611,20 +625,6 @@
}
device_declaration = 1;
pr->acpi_id = value;
- } else {
- /* Declared with "Processor" statement; match ProcessorID */
- status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX "Evaluating processor object\n");
- return -ENODEV;
- }
-
- /*
- * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
- * >>> 'acpi_get_processor_id(acpi_id, &id)' in
- * arch/xxx/acpi.c
- */
- pr->acpi_id = object.processor.proc_id;
}
cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
@@ -649,7 +649,16 @@
return -ENODEV;
}
}
-
+ /*
+ * On some boxes several processors use the same processor bus id.
+ * But they are located in different scope. For example:
+ * \_SB.SCK0.CPU0
+ * \_SB.SCK1.CPU0
+ * Rename the processor device bus id. And the new bus id will be
+ * generated as the following format:
+ * CPU+CPU ID.
+ */
+ sprintf(acpi_device_bid(device), "CPU%X", pr->id);
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
pr->acpi_id));
@@ -731,6 +740,8 @@
/* _PDC call should be done before doing anything else (if reqd.). */
arch_acpi_processor_init_pdc(pr);
acpi_processor_set_pdc(pr);
+ arch_acpi_processor_cleanup_pdc(pr);
+
#ifdef CONFIG_CPU_FREQ
acpi_processor_ppc_has_changed(pr);
#endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 10a2d91..0efa59e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -139,7 +139,7 @@
* are affected too. We pick the most conservative approach: we assume
* that the local APIC stops in both C2 and C3.
*/
-static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
struct acpi_processor_power *pwr = &pr->power;
@@ -162,7 +162,7 @@
pr->power.timer_broadcast_on_state = state;
}
-static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
+static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
{
unsigned long reason;
@@ -173,7 +173,7 @@
}
/* Power(C) State timer broadcast control */
-static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
@@ -190,10 +190,10 @@
#else
-static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cstate) { }
-static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
-static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
+static void lapic_timer_state_broadcast(struct acpi_processor *pr,
struct acpi_processor_cx *cx,
int broadcast)
{
@@ -515,7 +515,8 @@
static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
struct acpi_processor_cx *cx)
{
- static int bm_check_flag;
+ static int bm_check_flag = -1;
+ static int bm_control_flag = -1;
if (!cx->address)
@@ -545,12 +546,14 @@
}
/* All the logic here assumes flags.bm_check is same across all CPUs */
- if (!bm_check_flag) {
+ if (bm_check_flag == -1) {
/* Determine whether bm_check is needed based on CPU */
acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
bm_check_flag = pr->flags.bm_check;
+ bm_control_flag = pr->flags.bm_control;
} else {
pr->flags.bm_check = bm_check_flag;
+ pr->flags.bm_control = bm_control_flag;
}
if (pr->flags.bm_check) {
@@ -614,29 +617,25 @@
switch (cx->type) {
case ACPI_STATE_C1:
cx->valid = 1;
- acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C2:
acpi_processor_power_verify_c2(cx);
- if (cx->valid)
- acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
- if (cx->valid)
- acpi_timer_check_state(i, pr, cx);
break;
}
- if (cx->valid)
- tsc_check_state(cx->type);
+ if (!cx->valid)
+ continue;
- if (cx->valid)
- working++;
+ lapic_timer_check_state(i, pr, cx);
+ tsc_check_state(cx->type);
+ working++;
}
- acpi_propagate_timer_broadcast(pr);
+ lapic_timer_propagate_broadcast(pr);
return (working);
}
@@ -839,7 +838,7 @@
return 0;
}
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
kt2 = ktime_get_real();
@@ -847,7 +846,7 @@
local_irq_enable();
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
return idle_time;
}
@@ -892,7 +891,7 @@
* Must be done before busmaster disable as we might need to
* access HPET !
*/
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
@@ -914,7 +913,7 @@
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return idle_time;
}
@@ -981,7 +980,7 @@
* Must be done before busmaster disable as we might need to
* access HPET !
*/
- acpi_state_timer_broadcast(pr, cx, 1);
+ lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
/*
@@ -1026,7 +1025,7 @@
cx->usage++;
- acpi_state_timer_broadcast(pr, cx, 0);
+ lapic_timer_state_broadcast(pr, cx, 0);
cx->time += sleep_ticks;
return idle_time;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8ff510b..781435d 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -95,7 +95,7 @@
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-static int acpi_bus_hot_remove_device(void *context)
+static void acpi_bus_hot_remove_device(void *context)
{
struct acpi_device *device;
acpi_handle handle = context;
@@ -104,10 +104,10 @@
acpi_status status = AE_OK;
if (acpi_bus_get_device(handle, &device))
- return 0;
+ return;
if (!device)
- return 0;
+ return;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
@@ -115,7 +115,7 @@
if (acpi_bus_trim(device, 1)) {
printk(KERN_ERR PREFIX
"Removing device failed\n");
- return -1;
+ return;
}
/* power off device */
@@ -142,9 +142,10 @@
*/
status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ printk(KERN_WARNING PREFIX
+ "Eject device failed\n");
- return 0;
+ return;
}
static ssize_t
@@ -155,7 +156,6 @@
acpi_status status;
acpi_object_type type = 0;
struct acpi_device *acpi_device = to_acpi_device(d);
- struct task_struct *task;
if ((!count) || (buf[0] != '1')) {
return -EINVAL;
@@ -172,11 +172,7 @@
goto err;
}
- /* remove the device in another thread to fix the deadlock issue */
- task = kthread_run(acpi_bus_hot_remove_device,
- acpi_device->handle, "acpi_hot_remove_device");
- if (IS_ERR(task))
- ret = PTR_ERR(task);
+ acpi_os_hotplug_execute(acpi_bus_hot_remove_device, acpi_device->handle);
err:
return ret;
}
@@ -198,12 +194,12 @@
int result;
result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
- if(result)
+ if (result)
goto end;
result = sprintf(buf, "%s\n", (char*)path.pointer);
kfree(path.pointer);
- end:
+end:
return result;
}
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
@@ -217,21 +213,21 @@
/*
* Devices gotten from FADT don't have a "path" attribute
*/
- if(dev->handle) {
+ if (dev->handle) {
result = device_create_file(&dev->dev, &dev_attr_path);
- if(result)
+ if (result)
goto end;
}
- if(dev->flags.hardware_id) {
+ if (dev->flags.hardware_id) {
result = device_create_file(&dev->dev, &dev_attr_hid);
- if(result)
+ if (result)
goto end;
}
- if (dev->flags.hardware_id || dev->flags.compatible_ids){
+ if (dev->flags.hardware_id || dev->flags.compatible_ids) {
result = device_create_file(&dev->dev, &dev_attr_modalias);
- if(result)
+ if (result)
goto end;
}
@@ -242,7 +238,7 @@
status = acpi_get_handle(dev->handle, "_EJ0", &temp);
if (ACPI_SUCCESS(status))
result = device_create_file(&dev->dev, &dev_attr_eject);
- end:
+end:
return result;
}
@@ -262,9 +258,9 @@
if (dev->flags.hardware_id || dev->flags.compatible_ids)
device_remove_file(&dev->dev, &dev_attr_modalias);
- if(dev->flags.hardware_id)
+ if (dev->flags.hardware_id)
device_remove_file(&dev->dev, &dev_attr_hid);
- if(dev->handle)
+ if (dev->handle)
device_remove_file(&dev->dev, &dev_attr_path);
}
/* --------------------------------------------------------------------------
@@ -512,7 +508,7 @@
break;
}
}
- if(!found) {
+ if (!found) {
acpi_device_bus_id = new_bus_id;
strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
acpi_device_bus_id->instance_no = 0;
@@ -530,22 +526,21 @@
if (device->parent)
device->dev.parent = &parent->dev;
device->dev.bus = &acpi_bus_type;
- device_initialize(&device->dev);
device->dev.release = &acpi_device_release;
- result = device_add(&device->dev);
- if(result) {
- dev_err(&device->dev, "Error adding device\n");
+ result = device_register(&device->dev);
+ if (result) {
+ dev_err(&device->dev, "Error registering device\n");
goto end;
}
result = acpi_device_setup_files(device);
- if(result)
+ if (result)
printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
dev_name(&device->dev));
device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
return 0;
- end:
+end:
mutex_lock(&acpi_device_lock);
if (device->parent)
list_del(&device->node);
@@ -577,7 +572,7 @@
* @device: the device to add and initialize
* @driver: driver for the device
*
- * Used to initialize a device via its device driver. Called whenever a
+ * Used to initialize a device via its device driver. Called whenever a
* driver is bound to a device. Invokes the driver's add() ops.
*/
static int
@@ -585,7 +580,6 @@
{
int result = 0;
-
if (!device || !driver)
return -EINVAL;
@@ -802,7 +796,7 @@
if (!acpi_match_device_ids(device, button_device_ids))
device->wakeup.flags.run_wake = 1;
- end:
+end:
if (ACPI_FAILURE(status))
device->flags.wake_capable = 0;
return 0;
@@ -1070,7 +1064,7 @@
break;
}
- /*
+ /*
* \_SB
* ----
* Fix for the system root bus device -- the only root-level device.
@@ -1320,7 +1314,7 @@
device->parent->ops.bind(device);
}
- end:
+end:
if (!result)
*child = device;
else {
@@ -1464,7 +1458,6 @@
return result;
}
-
EXPORT_SYMBOL(acpi_bus_add);
int acpi_bus_start(struct acpi_device *device)
@@ -1484,7 +1477,6 @@
}
return result;
}
-
EXPORT_SYMBOL(acpi_bus_start);
int acpi_bus_trim(struct acpi_device *start, int rmdevice)
@@ -1542,7 +1534,6 @@
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
-
static int acpi_bus_scan_fixed(struct acpi_device *root)
{
int result = 0;
@@ -1610,6 +1601,6 @@
if (result)
acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
- Done:
+Done:
return result;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1bdfb37..8851315 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -76,6 +76,7 @@
static int brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
+static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
static int acpi_video_resume(struct acpi_device *device);
@@ -586,6 +587,14 @@
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
},
},
+ {
+ .callback = video_set_bqc_offset,
+ .ident = "Acer Aspire 7720",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
+ },
+ },
{}
};
@@ -976,6 +985,11 @@
device->backlight->props.max_brightness = device->brightness->count-3;
kfree(name);
+ result = sysfs_create_link(&device->backlight->dev.kobj,
+ &device->dev->dev.kobj, "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+
device->cdev = thermal_cooling_device_register("LCD",
device->dev, &video_cooling_ops);
if (IS_ERR(device->cdev))
@@ -1054,15 +1068,15 @@
static int acpi_video_bus_check(struct acpi_video_bus *video)
{
acpi_status status = -ENOENT;
- struct device *dev;
+ struct pci_dev *dev;
if (!video)
return -EINVAL;
- dev = acpi_get_physical_pci_device(video->device->handle);
+ dev = acpi_get_pci_dev(video->device->handle);
if (!dev)
return -ENODEV;
- put_device(dev);
+ pci_dev_put(dev);
/* Since there is no HID, CID and so on for VGA driver, we have
* to check well known required nodes.
@@ -1990,6 +2004,7 @@
status = acpi_remove_notify_handler(device->dev->handle,
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
+ sysfs_remove_link(&device->backlight->dev.kobj, "device");
backlight_device_unregister(device->backlight);
if (device->cdev) {
sysfs_remove_link(&device->dev->dev.kobj,
@@ -2318,6 +2333,13 @@
int acpi_video_register(void)
{
int result = 0;
+ if (register_count) {
+ /*
+ * if the function of acpi_video_register is already called,
+ * don't register the acpi_vide_bus again and return no error.
+ */
+ return 0;
+ }
acpi_video_dir = proc_mkdir(ACPI_VIDEO_CLASS, acpi_root_dir);
if (!acpi_video_dir)
@@ -2329,10 +2351,35 @@
return -ENODEV;
}
+ /*
+ * When the acpi_video_bus is loaded successfully, increase
+ * the counter reference.
+ */
+ register_count = 1;
+
return 0;
}
EXPORT_SYMBOL(acpi_video_register);
+void acpi_video_unregister(void)
+{
+ if (!register_count) {
+ /*
+ * If the acpi video bus is already unloaded, don't
+ * unload it again and return directly.
+ */
+ return;
+ }
+ acpi_bus_unregister_driver(&acpi_video_bus);
+
+ remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+
+ register_count = 0;
+
+ return;
+}
+EXPORT_SYMBOL(acpi_video_unregister);
+
/*
* This is kind of nasty. Hardware using Intel chipsets may require
* the video opregion code to be run first in order to initialise
@@ -2350,16 +2397,12 @@
return acpi_video_register();
}
-void acpi_video_exit(void)
+static void __exit acpi_video_exit(void)
{
-
- acpi_bus_unregister_driver(&acpi_video_bus);
-
- remove_proc_entry(ACPI_VIDEO_CLASS, acpi_root_dir);
+ acpi_video_unregister();
return;
}
-EXPORT_SYMBOL(acpi_video_exit);
module_init(acpi_video_init);
module_exit(acpi_video_exit);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 0973727..7cd2b63 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -10,7 +10,7 @@
* assinged
*
* After PCI devices are glued with ACPI devices
- * acpi_get_physical_pci_device() can be called to identify ACPI graphics
+ * acpi_get_pci_dev() can be called to identify ACPI graphics
* devices for which a real graphics card is plugged in
*
* Now acpi_video_get_capabilities() can be called to check which
@@ -36,6 +36,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/pci.h>
ACPI_MODULE_NAME("video");
#define _COMPONENT ACPI_VIDEO_COMPONENT
@@ -109,7 +110,7 @@
find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
{
long *cap = context;
- struct device *dev;
+ struct pci_dev *dev;
struct acpi_device *acpi_dev;
const struct acpi_device_id video_ids[] = {
@@ -120,10 +121,10 @@
return AE_OK;
if (!acpi_match_device_ids(acpi_dev, video_ids)) {
- dev = acpi_get_physical_pci_device(handle);
+ dev = acpi_get_pci_dev(handle);
if (!dev)
return AE_OK;
- put_device(dev);
+ pci_dev_put(dev);
*cap |= acpi_is_video_device(acpi_dev);
}
return AE_OK;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2aa1908..b17c57f 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -679,6 +679,14 @@
If unsure, say N.
+config PATA_AT91
+ tristate "PATA support for AT91SAM9260"
+ depends on ARM && ARCH_AT91
+ help
+ This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
+
+ If unsure, say N.
+
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
depends on PATA_PLATFORM && PPC_OF
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 1558059..38906f9 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -72,6 +72,7 @@
obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
+obj-$(CONFIG_PATA_AT91) += pata_at91.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
# Should be last but two libata driver
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ca4d208..045a486 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -125,19 +125,19 @@
static int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
-MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
static int atapi_dmadir = 0;
module_param(atapi_dmadir, int, 0444);
-MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
int atapi_passthru16 = 1;
module_param(atapi_passthru16, int, 0444);
-MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
+MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
int libata_fua = 0;
module_param_named(fua, libata_fua, int, 0444);
-MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
+MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
static int ata_ignore_hpa;
module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
@@ -153,11 +153,11 @@
int libata_noacpi = 0;
module_param_named(noacpi, libata_noacpi, int, 0444);
-MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
+MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
int libata_allow_tpm = 0;
module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
-MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
+MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
@@ -1993,11 +1993,17 @@
* Check if the current speed of the device requires IORDY. Used
* by various controllers for chip configuration.
*/
-
unsigned int ata_pio_need_iordy(const struct ata_device *adev)
{
- /* Controller doesn't support IORDY. Probably a pointless check
- as the caller should know this */
+ /* Don't set IORDY if we're preparing for reset. IORDY may
+ * lead to controller lock up on certain controllers if the
+ * port is not occupied. See bko#11703 for details.
+ */
+ if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
+ return 0;
+ /* Controller doesn't support IORDY. Probably a pointless
+ * check as the caller should know this.
+ */
if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
return 0;
/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
@@ -2020,7 +2026,6 @@
* Compute the highest mode possible if we are not using iordy. Return
* -1 if no iordy mode is available.
*/
-
static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
{
/* If we have no drive specific rule, then PIO 2 is non IORDY */
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
new file mode 100644
index 0000000..4b27617
--- /dev/null
+++ b/drivers/ata/pata_at91.c
@@ -0,0 +1,361 @@
+/*
+ * PATA driver for AT91SAM9260 Static Memory Controller
+ * with CompactFlash interface in True IDE mode
+ *
+ * Copyright (C) 2009 Matyukevich Sergey
+ *
+ * Based on:
+ * * generic platform driver by Paul Mundt: drivers/ata/pata_platform.c
+ * * pata_at32 driver by Kristoffer Nyborg Gregertsen
+ * * at91_ide driver by Stanislaw Gruszka
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/clk.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+
+#include <mach/at91sam9260_matrix.h>
+#include <mach/at91sam9_smc.h>
+#include <mach/at91sam9260.h>
+#include <mach/board.h>
+#include <mach/gpio.h>
+
+
+#define DRV_NAME "pata_at91"
+#define DRV_VERSION "0.1"
+
+#define CF_IDE_OFFSET 0x00c00000
+#define CF_ALT_IDE_OFFSET 0x00e00000
+#define CF_IDE_RES_SIZE 0x08
+
+struct at91_ide_info {
+ unsigned long mode;
+ unsigned int cs;
+
+ void __iomem *ide_addr;
+ void __iomem *alt_addr;
+};
+
+const struct ata_timing initial_timing =
+ {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
+
+static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
+{
+ unsigned long mul;
+
+ /*
+ * cycles = x [nsec] * f [Hz] / 10^9 [ns in sec] =
+ * x * (f / 1_000_000_000) =
+ * x * ((f * 65536) / 1_000_000_000) / 65536 =
+ * x * (((f / 10_000) * 65536) / 100_000) / 65536 =
+ */
+
+ mul = (mck_hz / 10000) << 16;
+ mul /= 100000;
+
+ return (ns * mul + 65536) >> 16; /* rounding */
+}
+
+static void set_smc_mode(struct at91_ide_info *info)
+{
+ at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
+ return;
+}
+
+static void set_smc_timing(struct device *dev,
+ struct at91_ide_info *info, const struct ata_timing *ata)
+{
+ int read_cycle, write_cycle, active, recover;
+ int nrd_setup, nrd_pulse, nrd_recover;
+ int nwe_setup, nwe_pulse;
+
+ int ncs_write_setup, ncs_write_pulse;
+ int ncs_read_setup, ncs_read_pulse;
+
+ unsigned int mck_hz;
+ struct clk *mck;
+
+ read_cycle = ata->cyc8b;
+ nrd_setup = ata->setup;
+ nrd_pulse = ata->act8b;
+ nrd_recover = ata->rec8b;
+
+ mck = clk_get(NULL, "mck");
+ BUG_ON(IS_ERR(mck));
+ mck_hz = clk_get_rate(mck);
+
+ read_cycle = calc_mck_cycles(read_cycle, mck_hz);
+ nrd_setup = calc_mck_cycles(nrd_setup, mck_hz);
+ nrd_pulse = calc_mck_cycles(nrd_pulse, mck_hz);
+ nrd_recover = calc_mck_cycles(nrd_recover, mck_hz);
+
+ clk_put(mck);
+
+ active = nrd_setup + nrd_pulse;
+ recover = read_cycle - active;
+
+ /* Need at least two cycles recovery */
+ if (recover < 2)
+ read_cycle = active + 2;
+
+ /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
+ ncs_read_setup = 1;
+ ncs_read_pulse = read_cycle - 2;
+
+ /* Write timings same as read timings */
+ write_cycle = read_cycle;
+ nwe_setup = nrd_setup;
+ nwe_pulse = nrd_pulse;
+ ncs_write_setup = ncs_read_setup;
+ ncs_write_pulse = ncs_read_pulse;
+
+ dev_dbg(dev, "ATA timings: nrd_setup = %d nrd_pulse = %d nrd_cycle = %d\n",
+ nrd_setup, nrd_pulse, read_cycle);
+ dev_dbg(dev, "ATA timings: nwe_setup = %d nwe_pulse = %d nwe_cycle = %d\n",
+ nwe_setup, nwe_pulse, write_cycle);
+ dev_dbg(dev, "ATA timings: ncs_read_setup = %d ncs_read_pulse = %d\n",
+ ncs_read_setup, ncs_read_pulse);
+ dev_dbg(dev, "ATA timings: ncs_write_setup = %d ncs_write_pulse = %d\n",
+ ncs_write_setup, ncs_write_pulse);
+
+ at91_sys_write(AT91_SMC_SETUP(info->cs),
+ AT91_SMC_NWESETUP_(nwe_setup) |
+ AT91_SMC_NRDSETUP_(nrd_setup) |
+ AT91_SMC_NCS_WRSETUP_(ncs_write_setup) |
+ AT91_SMC_NCS_RDSETUP_(ncs_read_setup));
+
+ at91_sys_write(AT91_SMC_PULSE(info->cs),
+ AT91_SMC_NWEPULSE_(nwe_pulse) |
+ AT91_SMC_NRDPULSE_(nrd_pulse) |
+ AT91_SMC_NCS_WRPULSE_(ncs_write_pulse) |
+ AT91_SMC_NCS_RDPULSE_(ncs_read_pulse));
+
+ at91_sys_write(AT91_SMC_CYCLE(info->cs),
+ AT91_SMC_NWECYCLE_(write_cycle) |
+ AT91_SMC_NRDCYCLE_(read_cycle));
+
+ return;
+}
+
+static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct at91_ide_info *info = ap->host->private_data;
+ struct ata_timing timing;
+ int ret;
+
+ /* Compute ATA timing and set it to SMC */
+ ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
+ if (ret) {
+ dev_warn(ap->dev, "Failed to compute ATA timing %d, \
+ set PIO_0 timing\n", ret);
+ set_smc_timing(ap->dev, info, &initial_timing);
+ } else {
+ set_smc_timing(ap->dev, info, &timing);
+ }
+
+ /* Setup SMC mode */
+ set_smc_mode(info);
+
+ return;
+}
+
+static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ struct at91_ide_info *info = dev->link->ap->host->private_data;
+ unsigned int consumed;
+ unsigned long flags;
+ unsigned int mode;
+
+ local_irq_save(flags);
+ mode = at91_sys_read(AT91_SMC_MODE(info->cs));
+
+ /* set 16bit mode before writing data */
+ at91_sys_write(AT91_SMC_MODE(info->cs),
+ (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16);
+
+ consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
+
+ /* restore 8bit mode after data is written */
+ at91_sys_write(AT91_SMC_MODE(info->cs),
+ (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8);
+
+ local_irq_restore(flags);
+ return consumed;
+}
+
+static struct scsi_host_template pata_at91_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_at91_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .sff_data_xfer = pata_at91_data_xfer_noirq,
+ .set_piomode = pata_at91_set_piomode,
+ .cable_detect = ata_cable_40wire,
+ .port_start = ATA_OP_NULL,
+};
+
+static int __devinit pata_at91_probe(struct platform_device *pdev)
+{
+ struct at91_cf_data *board = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct at91_ide_info *info;
+ struct resource *mem_res;
+ struct ata_host *host;
+ struct ata_port *ap;
+ int irq_flags = 0;
+ int irq = 0;
+ int ret;
+
+ /* get platform resources: IO/CTL memories and irq/rst pins */
+
+ if (pdev->num_resources != 1) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem_res) {
+ dev_err(dev, "failed to get mem resource\n");
+ return -EINVAL;
+ }
+
+ irq = board->irq_pin;
+
+ /* init ata host */
+
+ host = ata_host_alloc(dev, 1);
+
+ if (!host)
+ return -ENOMEM;
+
+ ap = host->ports[0];
+ ap->ops = &pata_at91_port_ops;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+ ap->pio_mask = ATA_PIO4;
+
+ if (!irq) {
+ ap->flags |= ATA_FLAG_PIO_POLLING;
+ ata_port_desc(ap, "no IRQ, using PIO polling");
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (!info) {
+ dev_err(dev, "failed to allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+ info->cs = board->chipselect;
+ info->mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
+ AT91_SMC_EXNWMODE_READY | AT91_SMC_BAT_SELECT |
+ AT91_SMC_DBW_8 | AT91_SMC_TDF_(0);
+
+ info->ide_addr = devm_ioremap(dev,
+ mem_res->start + CF_IDE_OFFSET, CF_IDE_RES_SIZE);
+
+ if (!info->ide_addr) {
+ dev_err(dev, "failed to map IO base\n");
+ ret = -ENOMEM;
+ goto err_ide_ioremap;
+ }
+
+ info->alt_addr = devm_ioremap(dev,
+ mem_res->start + CF_ALT_IDE_OFFSET, CF_IDE_RES_SIZE);
+
+ if (!info->alt_addr) {
+ dev_err(dev, "failed to map CTL base\n");
+ ret = -ENOMEM;
+ goto err_alt_ioremap;
+ }
+
+ ap->ioaddr.cmd_addr = info->ide_addr;
+ ap->ioaddr.ctl_addr = info->alt_addr + 0x06;
+ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ata_port_desc(ap, "mmio cmd 0x%llx ctl 0x%llx",
+ (unsigned long long)mem_res->start + CF_IDE_OFFSET,
+ (unsigned long long)mem_res->start + CF_ALT_IDE_OFFSET);
+
+ host->private_data = info;
+
+ return ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
+ irq ? ata_sff_interrupt : NULL,
+ irq_flags, &pata_at91_sht);
+
+err_alt_ioremap:
+ devm_iounmap(dev, info->ide_addr);
+
+err_ide_ioremap:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit pata_at91_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct at91_ide_info *info = host->private_data;
+ struct device *dev = &pdev->dev;
+
+ if (!host)
+ return 0;
+
+ ata_host_detach(host);
+
+ if (!info)
+ return 0;
+
+ devm_iounmap(dev, info->ide_addr);
+ devm_iounmap(dev, info->alt_addr);
+
+ kfree(info);
+ return 0;
+}
+
+static struct platform_driver pata_at91_driver = {
+ .probe = pata_at91_probe,
+ .remove = __devexit_p(pata_at91_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pata_at91_init(void)
+{
+ return platform_driver_register(&pata_at91_driver);
+}
+
+static void __exit pata_at91_exit(void)
+{
+ platform_driver_unregister(&pata_at91_driver);
+}
+
+
+module_init(pata_at91_init);
+module_exit(pata_at91_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC");
+MODULE_AUTHOR("Matyukevich Sergey");
+MODULE_VERSION(DRV_VERSION);
+
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 36b8629..94eaa43 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1378,6 +1378,37 @@
return 0;
}
+#ifdef CONFIG_PM
+static int sata_fsl_suspend(struct of_device *op, pm_message_t state)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ return ata_host_suspend(host, state);
+}
+
+static int sata_fsl_resume(struct of_device *op)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ struct sata_fsl_host_priv *host_priv = host->private_data;
+ int ret;
+ void __iomem *hcr_base = host_priv->hcr_base;
+ struct ata_port *ap = host->ports[0];
+ struct sata_fsl_port_priv *pp = ap->private_data;
+
+ ret = sata_fsl_init_controller(host);
+ if (ret) {
+ dev_printk(KERN_ERR, &op->dev,
+ "Error initialize hardware\n");
+ return ret;
+ }
+
+ /* Recovery the CHBA register in host controller cmd register set */
+ iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
static struct of_device_id fsl_sata_match[] = {
{
.compatible = "fsl,pq-sata",
@@ -1392,6 +1423,10 @@
.match_table = fsl_sata_match,
.probe = sata_fsl_probe,
.remove = sata_fsl_remove,
+#ifdef CONFIG_PM
+ .suspend = sata_fsl_suspend,
+ .resume = sata_fsl_resume,
+#endif
};
static int __init sata_fsl_init(void)
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index 44c113d..1d7c34c 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -8,6 +8,10 @@
* Licensed under the GPL-2 or later.
*/
+#define DRV_NAME "bfin-jtag-comm"
+#define DEV_NAME "ttyBFJC"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
#include <linux/circ_buf.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -22,18 +26,14 @@
#include <linux/tty_flip.h>
#include <asm/atomic.h>
+#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
+
/* See the Debug/Emulation chapter in the HRM */
#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
-#define DRV_NAME "bfin-jtag-comm"
-#define DEV_NAME "ttyBFJC"
-
-#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
-#define debug(fmt, args...) pr_debug(DRV_NAME ": " fmt, ## args)
-
static inline uint32_t bfin_write_emudat(uint32_t emudat)
{
__asm__ __volatile__("emudat = %0;" : : "d"(emudat));
@@ -74,7 +74,7 @@
while (!kthread_should_stop()) {
/* no one left to give data to, so sleep */
if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
- debug("waiting for readers\n");
+ pr_debug("waiting for readers\n");
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
@@ -82,7 +82,7 @@
/* no data available, so just chill */
if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
- debug("waiting for data (in_len = %i) (circ: %i %i)\n",
+ pr_debug("waiting for data (in_len = %i) (circ: %i %i)\n",
inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
if (inbound_len)
schedule();
@@ -99,11 +99,11 @@
if (tty != NULL) {
uint32_t emudat = bfin_read_emudat();
if (inbound_len == 0) {
- debug("incoming length: 0x%08x\n", emudat);
+ pr_debug("incoming length: 0x%08x\n", emudat);
inbound_len = emudat;
} else {
size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
- debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
inbound_len -= num_chars;
tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
tty_flip_buffer_push(tty);
@@ -117,7 +117,7 @@
if (outbound_len == 0) {
outbound_len = circ_cnt(&bfin_jc_write_buf);
bfin_write_emudat(outbound_len);
- debug("outgoing length: 0x%08x\n", outbound_len);
+ pr_debug("outgoing length: 0x%08x\n", outbound_len);
} else {
struct tty_struct *tty;
int tail = bfin_jc_write_buf.tail;
@@ -136,7 +136,7 @@
if (tty)
tty_wakeup(tty);
mutex_unlock(&bfin_jc_tty_mutex);
- debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
+ pr_debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
}
}
}
@@ -149,7 +149,7 @@
bfin_jc_open(struct tty_struct *tty, struct file *filp)
{
mutex_lock(&bfin_jc_tty_mutex);
- debug("open %lu\n", bfin_jc_count);
+ pr_debug("open %lu\n", bfin_jc_count);
++bfin_jc_count;
bfin_jc_tty = tty;
wake_up_process(bfin_jc_kthread);
@@ -161,7 +161,7 @@
bfin_jc_close(struct tty_struct *tty, struct file *filp)
{
mutex_lock(&bfin_jc_tty_mutex);
- debug("close %lu\n", bfin_jc_count);
+ pr_debug("close %lu\n", bfin_jc_count);
if (--bfin_jc_count == 0)
bfin_jc_tty = NULL;
wake_up_process(bfin_jc_kthread);
@@ -174,7 +174,7 @@
{
int i;
count = min(count, circ_free(&bfin_jc_write_buf));
- debug("going to write chunk of %i bytes\n", count);
+ pr_debug("going to write chunk of %i bytes\n", count);
for (i = 0; i < count; ++i)
circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
bfin_jc_write_buf.head += i;
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 140ea10..c02db01 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -27,6 +27,7 @@
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/mm.h>
+#include <asm/pgtable.h>
#include <asm/io.h>
/*
@@ -75,12 +76,13 @@
static int bsr_major;
enum {
- BSR_8 = 0,
- BSR_16 = 1,
- BSR_64 = 2,
- BSR_128 = 3,
- BSR_UNKNOWN = 4,
- BSR_MAX = 5,
+ BSR_8 = 0,
+ BSR_16 = 1,
+ BSR_64 = 2,
+ BSR_128 = 3,
+ BSR_4096 = 4,
+ BSR_UNKNOWN = 5,
+ BSR_MAX = 6,
};
static unsigned bsr_types[BSR_MAX];
@@ -117,15 +119,22 @@
{
unsigned long size = vma->vm_end - vma->vm_start;
struct bsr_dev *dev = filp->private_data;
+ int ret;
- if (size > dev->bsr_len || (size & (PAGE_SIZE-1)))
- return -EINVAL;
-
- vma->vm_flags |= (VM_IO | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT,
- size, vma->vm_page_prot))
+ /* check for the case of a small BSR device and map one 4k page for it*/
+ if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
+ ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
+ vma->vm_page_prot);
+ else if (size <= dev->bsr_len)
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ dev->bsr_addr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ else
+ return -EINVAL;
+
+ if (ret)
return -EAGAIN;
return 0;
@@ -205,6 +214,11 @@
cur->bsr_stride = bsr_stride[i];
cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
+ /* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
+ /* we can only map 4k of it, so only advertise the 4k in sysfs */
+ if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
+ cur->bsr_len = 4096;
+
switch(cur->bsr_bytes) {
case 8:
cur->bsr_type = BSR_8;
@@ -218,9 +232,11 @@
case 128:
cur->bsr_type = BSR_128;
break;
+ case 4096:
+ cur->bsr_type = BSR_4096;
+ break;
default:
cur->bsr_type = BSR_UNKNOWN;
- printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes);
}
cur->bsr_num = bsr_types[cur->bsr_type];
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 6799588..65b6ff2 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1189,11 +1189,6 @@
return -ENODEV;
}
- if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
- retval = -ENODEV;
- goto out_unlock;
- }
-
ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
ch->port.count++;
tty->driver_data = ch;
@@ -1218,8 +1213,8 @@
moxa_close_port(tty);
} else
ch->port.flags |= ASYNC_NORMAL_ACTIVE;
-out_unlock:
mutex_unlock(&moxa_openlock);
+
return retval;
}
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 9533f43..52d953e 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1048,8 +1048,6 @@
if (retval)
return retval;
- /* unmark here for very high baud rate (ex. 921600 bps) used */
- tty->low_latency = 1;
return 0;
}
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 461ece5..1c43c8c 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -10,7 +10,6 @@
* Paul Mackerras <Paul.Mackerras@cs.anu.edu.au>
*
* Original release 01/11/99
- * $Id: n_hdlc.c,v 4.8 2003/05/06 21:18:51 paulkf Exp $
*
* This code is released under the GNU General Public License (GPL)
*
@@ -79,7 +78,6 @@
*/
#define HDLC_MAGIC 0x239e
-#define HDLC_VERSION "$Revision: 4.8 $"
#include <linux/module.h>
#include <linux/init.h>
@@ -114,7 +112,7 @@
#define MAX_HDLC_FRAME_SIZE 65535
#define DEFAULT_RX_BUF_COUNT 10
#define MAX_RX_BUF_COUNT 60
-#define DEFAULT_TX_BUF_COUNT 1
+#define DEFAULT_TX_BUF_COUNT 3
struct n_hdlc_buf {
struct n_hdlc_buf *link;
@@ -199,6 +197,31 @@
#define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data))
#define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty)
+static void flush_rx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf);
+}
+
+static void flush_tx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+ unsigned long flags;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+ if (n_hdlc->tbuf) {
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
+ n_hdlc->tbuf = NULL;
+ }
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+}
+
static struct tty_ldisc_ops n_hdlc_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
@@ -211,6 +234,7 @@
.poll = n_hdlc_tty_poll,
.receive_buf = n_hdlc_tty_receive,
.write_wakeup = n_hdlc_tty_wakeup,
+ .flush_buffer = flush_rx_queue,
};
/**
@@ -341,10 +365,7 @@
set_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
#endif
- /* Flush any pending characters in the driver and discipline. */
- if (tty->ldisc->ops->flush_buffer)
- tty->ldisc->ops->flush_buffer(tty);
-
+ /* flush receive data from driver */
tty_driver_flush_buffer(tty);
if (debuglevel >= DEBUG_LEVEL_INFO)
@@ -763,6 +784,14 @@
error = put_user(count, (int __user *)arg);
break;
+ case TCFLSH:
+ switch (arg) {
+ case TCIOFLUSH:
+ case TCOFLUSH:
+ flush_tx_queue(tty);
+ }
+ /* fall through to default */
+
default:
error = n_tty_ioctl_helper(tty, file, cmd, arg);
break;
@@ -919,8 +948,7 @@
} /* end of n_hdlc_buf_get() */
static char hdlc_banner[] __initdata =
- KERN_INFO "HDLC line discipline: version " HDLC_VERSION
- ", maxframe=%u\n";
+ KERN_INFO "HDLC line discipline maxframe=%u\n";
static char hdlc_register_ok[] __initdata =
KERN_INFO "N_HDLC line discipline registered.\n";
static char hdlc_register_fail[] __initdata =
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index d2e93e3..2e99158 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -1062,7 +1062,7 @@
struct r3964_client_info *pClient;
struct r3964_message *pMsg;
struct r3964_client_message theMsg;
- int count;
+ int ret;
TRACE_L("read()");
@@ -1074,8 +1074,8 @@
if (pMsg == NULL) {
/* no messages available. */
if (file->f_flags & O_NONBLOCK) {
- unlock_kernel();
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto unlock;
}
/* block until there is a message: */
wait_event_interruptible(pInfo->read_wait,
@@ -1085,29 +1085,31 @@
/* If we still haven't got a message, we must have been signalled */
if (!pMsg) {
- unlock_kernel();
- return -EINTR;
+ ret = -EINTR;
+ goto unlock;
}
/* deliver msg to client process: */
theMsg.msg_id = pMsg->msg_id;
theMsg.arg = pMsg->arg;
theMsg.error_code = pMsg->error_code;
- count = sizeof(struct r3964_client_message);
+ ret = sizeof(struct r3964_client_message);
kfree(pMsg);
TRACE_M("r3964_read - msg kfree %p", pMsg);
- if (copy_to_user(buf, &theMsg, count)) {
- unlock_kernel();
- return -EFAULT;
+ if (copy_to_user(buf, &theMsg, ret)) {
+ ret = -EFAULT;
+ goto unlock;
}
- TRACE_PS("read - return %d", count);
- return count;
+ TRACE_PS("read - return %d", ret);
+ goto unlock;
}
+ ret = -EPERM;
+unlock:
unlock_kernel();
- return -EPERM;
+ return ret;
}
static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index d6102b6..574f1c7 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1591,8 +1591,6 @@
/* Enable interrupt downlink for channel */
if (port->port.count == 1) {
- /* FIXME: is this needed now ? */
- tty->low_latency = 1;
tty->driver_data = port;
tty_port_tty_set(&port->port, tty);
DBG1("open: %d", port->token_dl);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index dbb9125..881934c 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1575,7 +1575,8 @@
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq);
- return 0;
+ rc = 0;
+ break;
case CM_IOCSPTS:
{
struct ptsreq krnptsreq;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 1386625..a2e67e6 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -467,7 +467,6 @@
static unsigned int tbuf_bytes(struct slgt_info *info);
static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
-static void tdma_start(struct slgt_info *info);
static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
static void get_signals(struct slgt_info *info);
@@ -795,6 +794,18 @@
}
}
+static void update_tx_timer(struct slgt_info *info)
+{
+ /*
+ * use worst case speed of 1200bps to calculate transmit timeout
+ * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
+ */
+ if (info->params.mode == MGSL_MODE_HDLC) {
+ int timeout = (tbuf_bytes(info) * 7) + 1000;
+ mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
static int write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
@@ -838,8 +849,18 @@
spin_lock_irqsave(&info->lock,flags);
if (!info->tx_active)
tx_start(info);
- else
- tdma_start(info);
+ else if (!(rd_reg32(info, TDCSR) & BIT0)) {
+ /* transmit still active but transmit DMA stopped */
+ unsigned int i = info->tbuf_current;
+ if (!i)
+ i = info->tbuf_count;
+ i--;
+ /* if DMA buf unsent must try later after tx idle */
+ if (desc_count(info->tbufs[i]))
+ ret = 0;
+ }
+ if (ret > 0)
+ update_tx_timer(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -1502,10 +1523,9 @@
/* save start time for transmit timeout detection */
dev->trans_start = jiffies;
- /* start hardware transmitter if necessary */
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- tx_start(info);
+ tx_start(info);
+ update_tx_timer(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
@@ -3946,50 +3966,19 @@
slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
/* clear tx idle and underrun status bits */
wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
- if (info->params.mode == MGSL_MODE_HDLC)
- mod_timer(&info->tx_timer, jiffies +
- msecs_to_jiffies(5000));
} else {
slgt_irq_off(info, IRQ_TXDATA);
slgt_irq_on(info, IRQ_TXIDLE);
/* clear tx idle status bit */
wr_reg16(info, SSR, IRQ_TXIDLE);
}
- tdma_start(info);
+ /* set 1st descriptor address and start DMA */
+ wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
+ wr_reg32(info, TDCSR, BIT2 + BIT0);
info->tx_active = true;
}
}
-/*
- * start transmit DMA if inactive and there are unsent buffers
- */
-static void tdma_start(struct slgt_info *info)
-{
- unsigned int i;
-
- if (rd_reg32(info, TDCSR) & BIT0)
- return;
-
- /* transmit DMA inactive, check for unsent buffers */
- i = info->tbuf_start;
- while (!desc_count(info->tbufs[i])) {
- if (++i == info->tbuf_count)
- i = 0;
- if (i == info->tbuf_current)
- return;
- }
- info->tbuf_start = i;
-
- /* there are unsent buffers, start transmit DMA */
-
- /* reset needed if previous error condition */
- tdma_reset(info);
-
- /* set 1st descriptor address */
- wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
- wr_reg32(info, TDCSR, BIT2 + BIT0); /* IRQ + DMA enable */
-}
-
static void tx_stop(struct slgt_info *info)
{
unsigned short val;
@@ -5004,8 +4993,7 @@
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->lock,flags);
- info->tx_active = false;
- info->tx_count = 0;
+ tx_stop(info);
spin_unlock_irqrestore(&info->lock,flags);
#if SYNCLINK_GENERIC_HDLC
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 62dadfc..4e862a7 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -193,7 +193,7 @@
{
int do_clocal = 0, retval;
unsigned long flags;
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
int cd;
/* block if port is in the process of being closed */
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index e6ce632..7539bed 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -396,7 +396,8 @@
kbd = kbd_table + console;
switch (cmd) {
case TIOCLINUX:
- return tioclinux(tty, arg);
+ ret = tioclinux(tty, arg);
+ break;
case KIOCSOUND:
if (!perm)
goto eperm;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 9aa9ea9..88dab52 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -432,23 +432,27 @@
list_splice_init(&txd->tx_list, &dc->free_list);
list_move(&desc->desc_node, &dc->free_list);
- /*
- * We use dma_unmap_page() regardless of how the buffers were
- * mapped before they were submitted...
- */
if (!ds) {
dma_addr_t dmaaddr;
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.DAR : desc->hwdesc32.DAR;
- dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
- desc->len, DMA_FROM_DEVICE);
+ if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.SAR : desc->hwdesc32.SAR;
- dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
- desc->len, DMA_TO_DEVICE);
+ if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_TO_DEVICE);
+ else
+ dma_unmap_page(chan2parent(&dc->chan),
+ dmaaddr, desc->len, DMA_TO_DEVICE);
}
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c36bf40..858fe60 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -754,13 +754,13 @@
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
int bit;
- enum dev_type edac_cap = EDAC_NONE;
+ enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
? 19
: 17;
- if (pvt->dclr0 >> BIT(bit))
+ if (pvt->dclr0 & BIT(bit))
edac_cap = EDAC_FLAG_SECDED;
return edac_cap;
@@ -1269,7 +1269,7 @@
if (channels == 0)
channels = 1;
- debugf0("DIMM count= %d\n", channels);
+ debugf0("MCT channel count: %d\n", channels);
return channels;
@@ -2966,7 +2966,12 @@
" Use of the override can cause "
"unknown side effects.\n");
ret = -ENODEV;
- }
+ } else
+ /*
+ * enable further driver loading if ECC enable is
+ * overridden.
+ */
+ ret = 0;
} else {
amd64_printk(KERN_INFO,
"ECC is enabled by BIOS, Proceeding "
@@ -3006,7 +3011,6 @@
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- mci->edac_cap = EDAC_FLAG_NONE;
if (pvt->nbcap & K8_NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
@@ -3052,7 +3056,7 @@
if (!pvt)
goto err_exit;
- pvt->mc_node_id = get_mc_node_id_from_pdev(dram_f2_ctl);
+ pvt->mc_node_id = get_node_id(dram_f2_ctl);
pvt->dram_f2_ctl = dram_f2_ctl;
pvt->ext_model = boot_cpu_data.x86_model >> 4;
@@ -3179,8 +3183,7 @@
{
int ret = 0;
- debugf0("(MC node=%d,mc_type='%s')\n",
- get_mc_node_id_from_pdev(pdev),
+ debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
get_amd_family_name(mc_type->driver_data));
ret = pci_enable_device(pdev);
@@ -3319,15 +3322,17 @@
err = amd64_init_2nd_stage(pvt_lookup[nb]);
if (err)
- goto err_exit;
+ goto err_2nd_stage;
}
amd64_setup_pci_device();
return 0;
+err_2nd_stage:
+ debugf0("2nd stage failed\n");
+
err_exit:
- debugf0("'finish_setup' stage failed\n");
pci_unregister_driver(&amd64_pci_driver);
return err;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index a159957..ba73015 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -444,7 +444,7 @@
#define K8_MSR_MC4ADDR 0x0412
/* AMD sets the first MC device at device ID 0x18. */
-static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
+static inline int get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 4509024..13efcd3 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,28 +1,29 @@
-comment "A new alternative FireWire stack is available with EXPERIMENTAL=y"
- depends on EXPERIMENTAL=n
-
-comment "Enable only one of the two stacks, unless you know what you are doing"
- depends on EXPERIMENTAL
+comment "You can enable one or both FireWire driver stacks."
+comment "See the help texts for more information."
config FIREWIRE
- tristate "New FireWire stack, EXPERIMENTAL"
- depends on EXPERIMENTAL
+ tristate "FireWire driver stack"
select CRC_ITU_T
help
- This is the "Juju" FireWire stack, a new alternative implementation
- designed for robustness and simplicity. You can build either this
- stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both.
- Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
- before you enable the new stack.
+ This is the new-generation IEEE 1394 (FireWire) driver stack
+ a.k.a. Juju, a new implementation designed for robustness and
+ simplicity.
+ See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
+ for information about migration from the older Linux 1394 stack
+ to the new driver stack.
To compile this driver as a module, say M here: the module will be
called firewire-core.
This module functionally replaces ieee1394, raw1394, and video1394.
To access it from application programs, you generally need at least
- libraw1394 version 2. IIDC/DCAM applications also need libdc1394
- version 2. No libraries are required to access storage devices
- through the firewire-sbp2 driver.
+ libraw1394 v2. IIDC/DCAM applications need libdc1394 v2.
+ No libraries are required to access storage devices through the
+ firewire-sbp2 driver.
+
+ NOTE:
+ FireWire audio devices currently require the old drivers (ieee1394,
+ ohci1394, raw1394).
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
@@ -37,11 +38,9 @@
stack.
NOTE:
-
- You should only build either firewire-ohci or the old ohci1394 driver,
- but not both. If you nevertheless want to install both, you should
- configure them only as modules and blacklist the driver(s) which you
- don't want to have auto-loaded. Add either
+ If you want to install firewire-ohci and ohci1394 together, you
+ should configure them only as modules and blacklist the driver(s)
+ which you don't want to have auto-loaded. Add either
blacklist firewire-ohci
or
@@ -50,12 +49,7 @@
blacklist dv1394
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution. The latter two modules should be
- blacklisted together with ohci1394 because they depend on ohci1394.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use "install modulename /bin/true" for the modules to be
- blacklisted.
+ depending on your distribution.
config FIREWIRE_OHCI_DEBUG
bool
@@ -77,3 +71,17 @@
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
+
+config FIREWIRE_NET
+ tristate "IP networking over 1394 (EXPERIMENTAL)"
+ depends on FIREWIRE && INET && EXPERIMENTAL
+ help
+ This enables IPv4 over IEEE 1394, providing IP connectivity with
+ other implementations of RFC 2734 as found on several operating
+ systems. Multicast support is currently limited.
+
+ NOTE, this driver is not stable yet!
+
+ To compile this driver as a module, say M here: The module will be
+ called firewire-net. It replaces eth1394 of the classic IEEE 1394
+ stack.
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index bc3b9bf..a8f9bb6 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -6,7 +6,9 @@
core-iso.o core-topology.o core-transaction.o
firewire-ohci-y += ohci.o
firewire-sbp2-y += sbp2.o
+firewire-net-y += net.o
-obj-$(CONFIG_FIREWIRE) += firewire-core.o
+obj-$(CONFIG_FIREWIRE) += firewire-core.o
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
+obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 4c1be64..543fcca 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -176,6 +176,7 @@
return 0;
}
+EXPORT_SYMBOL(fw_core_add_descriptor);
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
@@ -189,6 +190,7 @@
mutex_unlock(&card_mutex);
}
+EXPORT_SYMBOL(fw_core_remove_descriptor);
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
@@ -459,11 +461,11 @@
/*
- * The next few functions implements a dummy driver that use once a
- * card driver shuts down an fw_card. This allows the driver to
- * cleanly unload, as all IO to the card will be handled by the dummy
- * driver instead of calling into the (possibly) unloaded module. The
- * dummy driver just fails all IO.
+ * The next few functions implement a dummy driver that is used once a card
+ * driver shuts down an fw_card. This allows the driver to cleanly unload,
+ * as all IO to the card will be handled (and failed) by the dummy driver
+ * instead of calling into the module. Only functions for iso context
+ * shutdown still need to be provided by the card driver.
*/
static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
@@ -510,7 +512,7 @@
return -ENODEV;
}
-static struct fw_card_driver dummy_driver = {
+static const struct fw_card_driver dummy_driver_template = {
.enable = dummy_enable,
.update_phy_reg = dummy_update_phy_reg,
.set_config_rom = dummy_set_config_rom,
@@ -529,6 +531,8 @@
void fw_core_remove_card(struct fw_card *card)
{
+ struct fw_card_driver dummy_driver = dummy_driver_template;
+
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_core_initiate_bus_reset(card, 1);
@@ -537,7 +541,9 @@
list_del_init(&card->link);
mutex_unlock(&card_mutex);
- /* Set up the dummy driver. */
+ /* Switch off most of the card driver interface. */
+ dummy_driver.free_iso_context = card->driver->free_iso_context;
+ dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
fw_destroy_nodes(card);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 28076c8..166f19c 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -71,7 +71,7 @@
for (j = 0; j < i; j++) {
address = page_private(buffer->pages[j]);
dma_unmap_page(card->device, address,
- PAGE_SIZE, DMA_TO_DEVICE);
+ PAGE_SIZE, direction);
__free_page(buffer->pages[j]);
}
kfree(buffer->pages);
@@ -80,6 +80,7 @@
return -ENOMEM;
}
+EXPORT_SYMBOL(fw_iso_buffer_init);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{
@@ -107,13 +108,14 @@
for (i = 0; i < buffer->page_count; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
- PAGE_SIZE, DMA_TO_DEVICE);
+ PAGE_SIZE, buffer->direction);
__free_page(buffer->pages[i]);
}
kfree(buffer->pages);
buffer->pages = NULL;
}
+EXPORT_SYMBOL(fw_iso_buffer_destroy);
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
@@ -136,6 +138,7 @@
return ctx;
}
+EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
@@ -143,12 +146,14 @@
card->driver->free_iso_context(ctx);
}
+EXPORT_SYMBOL(fw_iso_context_destroy);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
+EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
@@ -159,11 +164,13 @@
return card->driver->queue_iso(ctx, packet, buffer, payload);
}
+EXPORT_SYMBOL(fw_iso_context_queue);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
+EXPORT_SYMBOL(fw_iso_context_stop);
/*
* Isochronous bus resource management (channels, bandwidth), client side
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0a25a7b..c3cfc64 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -1,7 +1,6 @@
#ifndef _FIREWIRE_CORE_H
#define _FIREWIRE_CORE_H
-#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
@@ -97,17 +96,6 @@
int fw_compute_block_crc(u32 *block);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
-struct fw_descriptor {
- struct list_head link;
- size_t length;
- u32 immediate;
- u32 key;
- const u32 *data;
-};
-
-int fw_core_add_descriptor(struct fw_descriptor *desc);
-void fw_core_remove_descriptor(struct fw_descriptor *desc);
-
/* -cdev */
@@ -130,77 +118,7 @@
/* -iso */
-/*
- * The iso packet format allows for an immediate header/payload part
- * stored in 'header' immediately after the packet info plus an
- * indirect payload part that is pointer to by the 'payload' field.
- * Applications can use one or the other or both to implement simple
- * low-bandwidth streaming (e.g. audio) or more advanced
- * scatter-gather streaming (e.g. assembling video frame automatically).
- */
-struct fw_iso_packet {
- u16 payload_length; /* Length of indirect payload. */
- u32 interrupt:1; /* Generate interrupt on this packet */
- u32 skip:1; /* Set to not send packet at all. */
- u32 tag:2;
- u32 sy:4;
- u32 header_length:8; /* Length of immediate header. */
- u32 header[0];
-};
-
-#define FW_ISO_CONTEXT_TRANSMIT 0
-#define FW_ISO_CONTEXT_RECEIVE 1
-
-#define FW_ISO_CONTEXT_MATCH_TAG0 1
-#define FW_ISO_CONTEXT_MATCH_TAG1 2
-#define FW_ISO_CONTEXT_MATCH_TAG2 4
-#define FW_ISO_CONTEXT_MATCH_TAG3 8
-#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
-
-/*
- * An iso buffer is just a set of pages mapped for DMA in the
- * specified direction. Since the pages are to be used for DMA, they
- * are not mapped into the kernel virtual address space. We store the
- * DMA address in the page private. The helper function
- * fw_iso_buffer_map() will map the pages into a given vma.
- */
-struct fw_iso_buffer {
- enum dma_data_direction direction;
- struct page **pages;
- int page_count;
-};
-
-typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
- u32 cycle, size_t header_length,
- void *header, void *data);
-
-struct fw_iso_context {
- struct fw_card *card;
- int type;
- int channel;
- int speed;
- size_t header_size;
- fw_iso_callback_t callback;
- void *callback_data;
-};
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
- int page_count, enum dma_data_direction direction);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
-int fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload);
-int fw_iso_context_start(struct fw_iso_context *ctx,
- int cycle, int sync, int tags);
-int fw_iso_context_stop(struct fw_iso_context *ctx);
-void fw_iso_context_destroy(struct fw_iso_context *ctx);
-
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth, bool allocate);
@@ -285,9 +203,4 @@
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
-static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
-{
- return tag << 14 | channel << 8 | sy;
-}
-
#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
new file mode 100644
index 0000000..a42209a
--- /dev/null
+++ b/drivers/firewire/net.c
@@ -0,0 +1,1655 @@
+/*
+ * IPv4 over IEEE 1394, per RFC 2734
+ *
+ * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
+ *
+ * based on eth1394 by Ben Collins et al
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/highmem.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include <asm/unaligned.h>
+#include <net/arp.h>
+
+#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */
+#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2)
+
+#define IEEE1394_BROADCAST_CHANNEL 31
+#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
+#define IEEE1394_MAX_PAYLOAD_S100 512
+#define FWNET_NO_FIFO_ADDR (~0ULL)
+
+#define IANA_SPECIFIER_ID 0x00005eU
+#define RFC2734_SW_VERSION 0x000001U
+
+#define IEEE1394_GASP_HDR_SIZE 8
+
+#define RFC2374_UNFRAG_HDR_SIZE 4
+#define RFC2374_FRAG_HDR_SIZE 8
+#define RFC2374_FRAG_OVERHEAD 4
+
+#define RFC2374_HDR_UNFRAG 0 /* unfragmented */
+#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */
+#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
+#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
+
+#define RFC2734_HW_ADDR_LEN 16
+
+struct rfc2734_arp {
+ __be16 hw_type; /* 0x0018 */
+ __be16 proto_type; /* 0x0806 */
+ u8 hw_addr_len; /* 16 */
+ u8 ip_addr_len; /* 4 */
+ __be16 opcode; /* ARP Opcode */
+ /* Above is exactly the same format as struct arphdr */
+
+ __be64 s_uniq_id; /* Sender's 64bit EUI */
+ u8 max_rec; /* Sender's max packet size */
+ u8 sspd; /* Sender's max speed */
+ __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
+ __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
+ __be32 sip; /* Sender's IP Address */
+ __be32 tip; /* IP Address of requested hw addr */
+} __attribute__((packed));
+
+/* This header format is specific to this driver implementation. */
+#define FWNET_ALEN 8
+#define FWNET_HLEN 10
+struct fwnet_header {
+ u8 h_dest[FWNET_ALEN]; /* destination address */
+ __be16 h_proto; /* packet type ID field */
+} __attribute__((packed));
+
+/* IPv4 and IPv6 encapsulation header */
+struct rfc2734_header {
+ u32 w0;
+ u32 w1;
+};
+
+#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
+#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
+#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
+#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
+
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_ether_type(et) (et)
+#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_fg_off(fgo) (fgo)
+
+#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
+
+static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
+ unsigned ether_type)
+{
+ hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
+ | fwnet_set_hdr_ether_type(ether_type);
+}
+
+static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
+ unsigned ether_type, unsigned dg_size, unsigned dgl)
+{
+ hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
+ | fwnet_set_hdr_dg_size(dg_size)
+ | fwnet_set_hdr_ether_type(ether_type);
+ hdr->w1 = fwnet_set_hdr_dgl(dgl);
+}
+
+static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
+ unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
+{
+ hdr->w0 = fwnet_set_hdr_lf(lf)
+ | fwnet_set_hdr_dg_size(dg_size)
+ | fwnet_set_hdr_fg_off(fg_off);
+ hdr->w1 = fwnet_set_hdr_dgl(dgl);
+}
+
+/* This list keeps track of what parts of the datagram have been filled in */
+struct fwnet_fragment_info {
+ struct list_head fi_link;
+ u16 offset;
+ u16 len;
+};
+
+struct fwnet_partial_datagram {
+ struct list_head pd_link;
+ struct list_head fi_list;
+ struct sk_buff *skb;
+ /* FIXME Why not use skb->data? */
+ char *pbuf;
+ u16 datagram_label;
+ u16 ether_type;
+ u16 datagram_size;
+};
+
+static DEFINE_MUTEX(fwnet_device_mutex);
+static LIST_HEAD(fwnet_device_list);
+
+struct fwnet_device {
+ struct list_head dev_link;
+ spinlock_t lock;
+ enum {
+ FWNET_BROADCAST_ERROR,
+ FWNET_BROADCAST_RUNNING,
+ FWNET_BROADCAST_STOPPED,
+ } broadcast_state;
+ struct fw_iso_context *broadcast_rcv_context;
+ struct fw_iso_buffer broadcast_rcv_buffer;
+ void **broadcast_rcv_buffer_ptrs;
+ unsigned broadcast_rcv_next_ptr;
+ unsigned num_broadcast_rcv_ptrs;
+ unsigned rcv_buffer_size;
+ /*
+ * This value is the maximum unfragmented datagram size that can be
+ * sent by the hardware. It already has the GASP overhead and the
+ * unfragmented datagram header overhead calculated into it.
+ */
+ unsigned broadcast_xmt_max_payload;
+ u16 broadcast_xmt_datagramlabel;
+
+ /*
+ * The CSR address that remote nodes must send datagrams to for us to
+ * receive them.
+ */
+ struct fw_address_handler handler;
+ u64 local_fifo;
+
+ /* List of packets to be sent */
+ struct list_head packet_list;
+ /*
+ * List of packets that were broadcasted. When we get an ISO interrupt
+ * one of them has been sent
+ */
+ struct list_head broadcasted_list;
+ /* List of packets that have been sent but not yet acked */
+ struct list_head sent_list;
+
+ struct list_head peer_list;
+ struct fw_card *card;
+ struct net_device *netdev;
+};
+
+struct fwnet_peer {
+ struct list_head peer_link;
+ struct fwnet_device *dev;
+ u64 guid;
+ u64 fifo;
+
+ /* guarded by dev->lock */
+ struct list_head pd_list; /* received partial datagrams */
+ unsigned pdg_size; /* pd_list size */
+
+ u16 datagram_label; /* outgoing datagram label */
+ unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
+ int node_id;
+ int generation;
+ unsigned speed;
+};
+
+/* This is our task struct. It's used for the packet complete callback. */
+struct fwnet_packet_task {
+ /*
+ * ptask can actually be on dev->packet_list, dev->broadcasted_list,
+ * or dev->sent_list depending on its current state.
+ */
+ struct list_head pt_link;
+ struct fw_transaction transaction;
+ struct rfc2734_header hdr;
+ struct sk_buff *skb;
+ struct fwnet_device *dev;
+
+ int outstanding_pkts;
+ unsigned max_payload;
+ u64 fifo_addr;
+ u16 dest_node;
+ u8 generation;
+ u8 speed;
+};
+
+/*
+ * saddr == NULL means use device source address.
+ * daddr == NULL means leave destination address (eg unresolved arp).
+ */
+static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned len)
+{
+ struct fwnet_header *h;
+
+ h = (struct fwnet_header *)skb_push(skb, sizeof(*h));
+ put_unaligned_be16(type, &h->h_proto);
+
+ if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ memset(h->h_dest, 0, net->addr_len);
+
+ return net->hard_header_len;
+ }
+
+ if (daddr) {
+ memcpy(h->h_dest, daddr, net->addr_len);
+
+ return net->hard_header_len;
+ }
+
+ return -net->hard_header_len;
+}
+
+static int fwnet_header_rebuild(struct sk_buff *skb)
+{
+ struct fwnet_header *h = (struct fwnet_header *)skb->data;
+
+ if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
+ return arp_find((unsigned char *)&h->h_dest, skb);
+
+ fw_notify("%s: unable to resolve type %04x addresses\n",
+ skb->dev->name, be16_to_cpu(h->h_proto));
+ return 0;
+}
+
+static int fwnet_header_cache(const struct neighbour *neigh,
+ struct hh_cache *hh)
+{
+ struct net_device *net;
+ struct fwnet_header *h;
+
+ if (hh->hh_type == cpu_to_be16(ETH_P_802_3))
+ return -1;
+ net = neigh->dev;
+ h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
+ h->h_proto = hh->hh_type;
+ memcpy(h->h_dest, neigh->ha, net->addr_len);
+ hh->hh_len = FWNET_HLEN;
+
+ return 0;
+}
+
+/* Called by Address Resolution module to notify changes in address. */
+static void fwnet_header_cache_update(struct hh_cache *hh,
+ const struct net_device *net, const unsigned char *haddr)
+{
+ memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len);
+}
+
+static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
+
+ return FWNET_ALEN;
+}
+
+static const struct header_ops fwnet_header_ops = {
+ .create = fwnet_header_create,
+ .rebuild = fwnet_header_rebuild,
+ .cache = fwnet_header_cache,
+ .cache_update = fwnet_header_cache_update,
+ .parse = fwnet_header_parse,
+};
+
+/* FIXME: is this correct for all cases? */
+static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
+ unsigned offset, unsigned len)
+{
+ struct fwnet_fragment_info *fi;
+ unsigned end = offset + len;
+
+ list_for_each_entry(fi, &pd->fi_list, fi_link)
+ if (offset < fi->offset + fi->len && end > fi->offset)
+ return true;
+
+ return false;
+}
+
+/* Assumes that new fragment does not overlap any existing fragments */
+static struct fwnet_fragment_info *fwnet_frag_new(
+ struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
+{
+ struct fwnet_fragment_info *fi, *fi2, *new;
+ struct list_head *list;
+
+ list = &pd->fi_list;
+ list_for_each_entry(fi, &pd->fi_list, fi_link) {
+ if (fi->offset + fi->len == offset) {
+ /* The new fragment can be tacked on to the end */
+ /* Did the new fragment plug a hole? */
+ fi2 = list_entry(fi->fi_link.next,
+ struct fwnet_fragment_info, fi_link);
+ if (fi->offset + fi->len == fi2->offset) {
+ /* glue fragments together */
+ fi->len += len + fi2->len;
+ list_del(&fi2->fi_link);
+ kfree(fi2);
+ } else {
+ fi->len += len;
+ }
+
+ return fi;
+ }
+ if (offset + len == fi->offset) {
+ /* The new fragment can be tacked on to the beginning */
+ /* Did the new fragment plug a hole? */
+ fi2 = list_entry(fi->fi_link.prev,
+ struct fwnet_fragment_info, fi_link);
+ if (fi2->offset + fi2->len == fi->offset) {
+ /* glue fragments together */
+ fi2->len += fi->len + len;
+ list_del(&fi->fi_link);
+ kfree(fi);
+
+ return fi2;
+ }
+ fi->offset = offset;
+ fi->len += len;
+
+ return fi;
+ }
+ if (offset > fi->offset + fi->len) {
+ list = &fi->fi_link;
+ break;
+ }
+ if (offset + len < fi->offset) {
+ list = fi->fi_link.prev;
+ break;
+ }
+ }
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ fw_error("out of memory\n");
+ return NULL;
+ }
+
+ new->offset = offset;
+ new->len = len;
+ list_add(&new->fi_link, list);
+
+ return new;
+}
+
+static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
+ struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
+ void *frag_buf, unsigned frag_off, unsigned frag_len)
+{
+ struct fwnet_partial_datagram *new;
+ struct fwnet_fragment_info *fi;
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ goto fail;
+
+ INIT_LIST_HEAD(&new->fi_list);
+ fi = fwnet_frag_new(new, frag_off, frag_len);
+ if (fi == NULL)
+ goto fail_w_new;
+
+ new->datagram_label = datagram_label;
+ new->datagram_size = dg_size;
+ new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15);
+ if (new->skb == NULL)
+ goto fail_w_fi;
+
+ skb_reserve(new->skb, (net->hard_header_len + 15) & ~15);
+ new->pbuf = skb_put(new->skb, dg_size);
+ memcpy(new->pbuf + frag_off, frag_buf, frag_len);
+ list_add_tail(&new->pd_link, &peer->pd_list);
+
+ return new;
+
+fail_w_fi:
+ kfree(fi);
+fail_w_new:
+ kfree(new);
+fail:
+ fw_error("out of memory\n");
+
+ return NULL;
+}
+
+static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
+ u16 datagram_label)
+{
+ struct fwnet_partial_datagram *pd;
+
+ list_for_each_entry(pd, &peer->pd_list, pd_link)
+ if (pd->datagram_label == datagram_label)
+ return pd;
+
+ return NULL;
+}
+
+
+static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
+{
+ struct fwnet_fragment_info *fi, *n;
+
+ list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
+ kfree(fi);
+
+ list_del(&old->pd_link);
+ dev_kfree_skb_any(old->skb);
+ kfree(old);
+}
+
+static bool fwnet_pd_update(struct fwnet_peer *peer,
+ struct fwnet_partial_datagram *pd, void *frag_buf,
+ unsigned frag_off, unsigned frag_len)
+{
+ if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
+ return false;
+
+ memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
+
+ /*
+ * Move list entry to beginnig of list so that oldest partial
+ * datagrams percolate to the end of the list
+ */
+ list_move_tail(&pd->pd_link, &peer->pd_list);
+
+ return true;
+}
+
+static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
+{
+ struct fwnet_fragment_info *fi;
+
+ fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
+
+ return fi->len == pd->datagram_size;
+}
+
+/* caller must hold dev->lock */
+static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
+ u64 guid)
+{
+ struct fwnet_peer *peer;
+
+ list_for_each_entry(peer, &dev->peer_list, peer_link)
+ if (peer->guid == guid)
+ return peer;
+
+ return NULL;
+}
+
+/* caller must hold dev->lock */
+static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
+ int node_id, int generation)
+{
+ struct fwnet_peer *peer;
+
+ list_for_each_entry(peer, &dev->peer_list, peer_link)
+ if (peer->node_id == node_id &&
+ peer->generation == generation)
+ return peer;
+
+ return NULL;
+}
+
+/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
+static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
+{
+ max_rec = min(max_rec, speed + 8);
+ max_rec = min(max_rec, 0xbU); /* <= 4096 */
+ if (max_rec < 8) {
+ fw_notify("max_rec %x out of range\n", max_rec);
+ max_rec = 8;
+ }
+
+ return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
+}
+
+
+static int fwnet_finish_incoming_packet(struct net_device *net,
+ struct sk_buff *skb, u16 source_node_id,
+ bool is_broadcast, u16 ether_type)
+{
+ struct fwnet_device *dev;
+ static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
+ int status;
+ __be64 guid;
+
+ dev = netdev_priv(net);
+ /* Write metadata, and then pass to the receive level */
+ skb->dev = net;
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
+
+ /*
+ * Parse the encapsulation header. This actually does the job of
+ * converting to an ethernet frame header, as well as arp
+ * conversion if needed. ARP conversion is easier in this
+ * direction, since we are using ethernet as our backend.
+ */
+ /*
+ * If this is an ARP packet, convert it. First, we want to make
+ * use of some of the fields, since they tell us a little bit
+ * about the sending machine.
+ */
+ if (ether_type == ETH_P_ARP) {
+ struct rfc2734_arp *arp1394;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ u64 fifo_addr;
+ u64 peer_guid;
+ unsigned sspd;
+ u16 max_payload;
+ struct fwnet_peer *peer;
+ unsigned long flags;
+
+ arp1394 = (struct rfc2734_arp *)skb->data;
+ arp = (struct arphdr *)skb->data;
+ arp_ptr = (unsigned char *)(arp + 1);
+ peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
+ fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
+ | get_unaligned_be32(&arp1394->fifo_lo);
+
+ sspd = arp1394->sspd;
+ /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
+ if (sspd > SCODE_3200) {
+ fw_notify("sspd %x out of range\n", sspd);
+ sspd = SCODE_3200;
+ }
+ max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ peer = fwnet_peer_find_by_guid(dev, peer_guid);
+ if (peer) {
+ peer->fifo = fifo_addr;
+
+ if (peer->speed > sspd)
+ peer->speed = sspd;
+ if (peer->max_payload > max_payload)
+ peer->max_payload = max_payload;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!peer) {
+ fw_notify("No peer for ARP packet from %016llx\n",
+ (unsigned long long)peer_guid);
+ goto failed_proto;
+ }
+
+ /*
+ * Now that we're done with the 1394 specific stuff, we'll
+ * need to alter some of the data. Believe it or not, all
+ * that needs to be done is sender_IP_address needs to be
+ * moved, the destination hardware address get stuffed
+ * in and the hardware address length set to 8.
+ *
+ * IMPORTANT: The code below overwrites 1394 specific data
+ * needed above so keep the munging of the data for the
+ * higher level IP stack last.
+ */
+
+ arp->ar_hln = 8;
+ /* skip over sender unique id */
+ arp_ptr += arp->ar_hln;
+ /* move sender IP addr */
+ put_unaligned(arp1394->sip, (u32 *)arp_ptr);
+ /* skip over sender IP addr */
+ arp_ptr += arp->ar_pln;
+
+ if (arp->ar_op == htons(ARPOP_REQUEST))
+ memset(arp_ptr, 0, sizeof(u64));
+ else
+ memcpy(arp_ptr, net->dev_addr, sizeof(u64));
+ }
+
+ /* Now add the ethernet header. */
+ guid = cpu_to_be64(dev->card->guid);
+ if (dev_hard_header(skb, net, ether_type,
+ is_broadcast ? &broadcast_hw : &guid,
+ NULL, skb->len) >= 0) {
+ struct fwnet_header *eth;
+ u16 *rawp;
+ __be16 protocol;
+
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ eth = (struct fwnet_header *)skb_mac_header(skb);
+ if (*eth->h_dest & 1) {
+ if (memcmp(eth->h_dest, net->broadcast,
+ net->addr_len) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+#if 0
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+#endif
+ } else {
+ if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ if (ntohs(eth->h_proto) >= 1536) {
+ protocol = eth->h_proto;
+ } else {
+ rawp = (u16 *)skb->data;
+ if (*rawp == 0xffff)
+ protocol = htons(ETH_P_802_3);
+ else
+ protocol = htons(ETH_P_802_2);
+ }
+ skb->protocol = protocol;
+ }
+ status = netif_rx(skb);
+ if (status == NET_RX_DROP) {
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+ } else {
+ net->stats.rx_packets++;
+ net->stats.rx_bytes += skb->len;
+ }
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ return 0;
+
+ failed_proto:
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+
+ dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ net->last_rx = jiffies;
+
+ return 0;
+}
+
+static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ int source_node_id, int generation,
+ bool is_broadcast)
+{
+ struct sk_buff *skb;
+ struct net_device *net = dev->netdev;
+ struct rfc2734_header hdr;
+ unsigned lf;
+ unsigned long flags;
+ struct fwnet_peer *peer;
+ struct fwnet_partial_datagram *pd;
+ int fg_off;
+ int dg_size;
+ u16 datagram_label;
+ int retval;
+ u16 ether_type;
+
+ hdr.w0 = be32_to_cpu(buf[0]);
+ lf = fwnet_get_hdr_lf(&hdr);
+ if (lf == RFC2374_HDR_UNFRAG) {
+ /*
+ * An unfragmented datagram has been received by the ieee1394
+ * bus. Build an skbuff around it so we can pass it to the
+ * high level network layer.
+ */
+ ether_type = fwnet_get_hdr_ether_type(&hdr);
+ buf++;
+ len -= RFC2374_UNFRAG_HDR_SIZE;
+
+ skb = dev_alloc_skb(len + net->hard_header_len + 15);
+ if (unlikely(!skb)) {
+ fw_error("out of memory\n");
+ net->stats.rx_dropped++;
+
+ return -1;
+ }
+ skb_reserve(skb, (net->hard_header_len + 15) & ~15);
+ memcpy(skb_put(skb, len), buf, len);
+
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ is_broadcast, ether_type);
+ }
+ /* A datagram fragment has been received, now the fun begins. */
+ hdr.w1 = ntohl(buf[1]);
+ buf += 2;
+ len -= RFC2374_FRAG_HDR_SIZE;
+ if (lf == RFC2374_HDR_FIRSTFRAG) {
+ ether_type = fwnet_get_hdr_ether_type(&hdr);
+ fg_off = 0;
+ } else {
+ ether_type = 0;
+ fg_off = fwnet_get_hdr_fg_off(&hdr);
+ }
+ datagram_label = fwnet_get_hdr_dgl(&hdr);
+ dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
+ if (!peer)
+ goto bad_proto;
+
+ pd = fwnet_pd_find(peer, datagram_label);
+ if (pd == NULL) {
+ while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
+ /* remove the oldest */
+ fwnet_pd_delete(list_first_entry(&peer->pd_list,
+ struct fwnet_partial_datagram, pd_link));
+ peer->pdg_size--;
+ }
+ pd = fwnet_pd_new(net, peer, datagram_label,
+ dg_size, buf, fg_off, len);
+ if (pd == NULL) {
+ retval = -ENOMEM;
+ goto bad_proto;
+ }
+ peer->pdg_size++;
+ } else {
+ if (fwnet_frag_overlap(pd, fg_off, len) ||
+ pd->datagram_size != dg_size) {
+ /*
+ * Differing datagram sizes or overlapping fragments,
+ * discard old datagram and start a new one.
+ */
+ fwnet_pd_delete(pd);
+ pd = fwnet_pd_new(net, peer, datagram_label,
+ dg_size, buf, fg_off, len);
+ if (pd == NULL) {
+ retval = -ENOMEM;
+ peer->pdg_size--;
+ goto bad_proto;
+ }
+ } else {
+ if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
+ /*
+ * Couldn't save off fragment anyway
+ * so might as well obliterate the
+ * datagram now.
+ */
+ fwnet_pd_delete(pd);
+ peer->pdg_size--;
+ goto bad_proto;
+ }
+ }
+ } /* new datagram or add to existing one */
+
+ if (lf == RFC2374_HDR_FIRSTFRAG)
+ pd->ether_type = ether_type;
+
+ if (fwnet_pd_is_complete(pd)) {
+ ether_type = pd->ether_type;
+ peer->pdg_size--;
+ skb = skb_get(pd->skb);
+ fwnet_pd_delete(pd);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ false, ether_type);
+ }
+ /*
+ * Datagram is not complete, we're done for the
+ * moment.
+ */
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+
+ bad_proto:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ return 0;
+}
+
+static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
+ int tcode, int destination, int source, int generation,
+ int speed, unsigned long long offset, void *payload,
+ size_t length, void *callback_data)
+{
+ struct fwnet_device *dev = callback_data;
+ int rcode;
+
+ if (destination == IEEE1394_ALL_NODES) {
+ kfree(r);
+
+ return;
+ }
+
+ if (offset != dev->handler.offset)
+ rcode = RCODE_ADDRESS_ERROR;
+ else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
+ rcode = RCODE_TYPE_ERROR;
+ else if (fwnet_incoming_packet(dev, payload, length,
+ source, generation, false) != 0) {
+ fw_error("Incoming packet failure\n");
+ rcode = RCODE_CONFLICT_ERROR;
+ } else
+ rcode = RCODE_COMPLETE;
+
+ fw_send_response(card, r, rcode);
+}
+
+static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ u32 cycle, size_t header_length, void *header, void *data)
+{
+ struct fwnet_device *dev;
+ struct fw_iso_packet packet;
+ struct fw_card *card;
+ __be16 *hdr_ptr;
+ __be32 *buf_ptr;
+ int retval;
+ u32 length;
+ u16 source_node_id;
+ u32 specifier_id;
+ u32 ver;
+ unsigned long offset;
+ unsigned long flags;
+
+ dev = data;
+ card = dev->card;
+ hdr_ptr = header;
+ length = be16_to_cpup(hdr_ptr);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
+ buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
+ if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
+ dev->broadcast_rcv_next_ptr = 0;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
+ | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
+ ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
+ source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
+
+ if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ buf_ptr += 2;
+ length -= IEEE1394_GASP_HDR_SIZE;
+ fwnet_incoming_packet(dev, buf_ptr, length,
+ source_node_id, -1, true);
+ }
+
+ packet.payload_length = dev->rcv_buffer_size;
+ packet.interrupt = 1;
+ packet.skip = 0;
+ packet.tag = 3;
+ packet.sy = 0;
+ packet.header_length = IEEE1394_GASP_HDR_SIZE;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
+ &dev->broadcast_rcv_buffer, offset);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (retval < 0)
+ fw_error("requeue failed\n");
+}
+
+static struct kmem_cache *fwnet_packet_task_cache;
+
+static int fwnet_send_packet(struct fwnet_packet_task *ptask);
+
+static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev;
+ unsigned long flags;
+
+ dev = ptask->dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_del(&ptask->pt_link);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ptask->outstanding_pkts--; /* FIXME access inside lock */
+
+ if (ptask->outstanding_pkts > 0) {
+ u16 dg_size;
+ u16 fg_off;
+ u16 datagram_label;
+ u16 lf;
+ struct sk_buff *skb;
+
+ /* Update the ptask to point to the next fragment and send it */
+ lf = fwnet_get_hdr_lf(&ptask->hdr);
+ switch (lf) {
+ case RFC2374_HDR_LASTFRAG:
+ case RFC2374_HDR_UNFRAG:
+ default:
+ fw_error("Outstanding packet %x lf %x, header %x,%x\n",
+ ptask->outstanding_pkts, lf, ptask->hdr.w0,
+ ptask->hdr.w1);
+ BUG();
+
+ case RFC2374_HDR_FIRSTFRAG:
+ /* Set frag type here for future interior fragments */
+ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
+ fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
+ datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
+ break;
+
+ case RFC2374_HDR_INTFRAG:
+ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
+ fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
+ + ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
+ datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
+ break;
+ }
+ skb = ptask->skb;
+ skb_pull(skb, ptask->max_payload);
+ if (ptask->outstanding_pkts > 1) {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
+ dg_size, fg_off, datagram_label);
+ } else {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
+ dg_size, fg_off, datagram_label);
+ ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
+ }
+ fwnet_send_packet(ptask);
+ } else {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+}
+
+static void fwnet_write_complete(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct fwnet_packet_task *ptask;
+
+ ptask = data;
+
+ if (rcode == RCODE_COMPLETE)
+ fwnet_transmit_packet_done(ptask);
+ else
+ fw_error("fwnet_write_complete: failed: %x\n", rcode);
+ /* ??? error recovery */
+}
+
+static int fwnet_send_packet(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev;
+ unsigned tx_len;
+ struct rfc2734_header *bufhdr;
+ unsigned long flags;
+
+ dev = ptask->dev;
+ tx_len = ptask->max_payload;
+ switch (fwnet_get_hdr_lf(&ptask->hdr)) {
+ case RFC2374_HDR_UNFRAG:
+ bufhdr = (struct rfc2734_header *)
+ skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
+ put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
+ break;
+
+ case RFC2374_HDR_FIRSTFRAG:
+ case RFC2374_HDR_INTFRAG:
+ case RFC2374_HDR_LASTFRAG:
+ bufhdr = (struct rfc2734_header *)
+ skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
+ put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
+ put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
+ break;
+
+ default:
+ BUG();
+ }
+ if (ptask->dest_node == IEEE1394_ALL_NODES) {
+ u8 *p;
+ int generation;
+ int node_id;
+
+ /* ptask->generation may not have been set yet */
+ generation = dev->card->generation;
+ smp_rmb();
+ node_id = dev->card->node_id;
+
+ p = skb_push(ptask->skb, 8);
+ put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
+ put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
+ | RFC2734_SW_VERSION, &p[4]);
+
+ /* We should not transmit if broadcast_channel.valid == 0. */
+ fw_send_request(dev->card, &ptask->transaction,
+ TCODE_STREAM_DATA,
+ fw_stream_packet_destination_id(3,
+ IEEE1394_BROADCAST_CHANNEL, 0),
+ generation, SCODE_100, 0ULL, ptask->skb->data,
+ tx_len + 8, fwnet_write_complete, ptask);
+
+ /* FIXME race? */
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+ }
+
+ fw_send_request(dev->card, &ptask->transaction,
+ TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
+ ptask->generation, ptask->speed, ptask->fifo_addr,
+ ptask->skb->data, tx_len, fwnet_write_complete, ptask);
+
+ /* FIXME race? */
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&ptask->pt_link, &dev->sent_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ dev->netdev->trans_start = jiffies;
+
+ return 0;
+}
+
+static int fwnet_broadcast_start(struct fwnet_device *dev)
+{
+ struct fw_iso_context *context;
+ int retval;
+ unsigned num_packets;
+ unsigned max_receive;
+ struct fw_iso_packet packet;
+ unsigned long offset;
+ unsigned u;
+
+ if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
+ /* outside OHCI posted write area? */
+ static const struct fw_address_region region = {
+ .start = 0xffff00000000ULL,
+ .end = CSR_REGISTER_BASE,
+ };
+
+ dev->handler.length = 4096;
+ dev->handler.address_callback = fwnet_receive_packet;
+ dev->handler.callback_data = dev;
+
+ retval = fw_core_add_address_handler(&dev->handler, ®ion);
+ if (retval < 0)
+ goto failed_initial;
+
+ dev->local_fifo = dev->handler.offset;
+ }
+
+ max_receive = 1U << (dev->card->max_receive + 1);
+ num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
+
+ if (!dev->broadcast_rcv_context) {
+ void **ptrptr;
+
+ context = fw_iso_context_create(dev->card,
+ FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
+ dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
+ if (IS_ERR(context)) {
+ retval = PTR_ERR(context);
+ goto failed_context_create;
+ }
+
+ retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
+ dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
+ if (retval < 0)
+ goto failed_buffer_init;
+
+ ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
+ if (!ptrptr) {
+ retval = -ENOMEM;
+ goto failed_ptrs_alloc;
+ }
+
+ dev->broadcast_rcv_buffer_ptrs = ptrptr;
+ for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
+ void *ptr;
+ unsigned v;
+
+ ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
+ for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
+ *ptrptr++ = (void *)
+ ((char *)ptr + v * max_receive);
+ }
+ dev->broadcast_rcv_context = context;
+ } else {
+ context = dev->broadcast_rcv_context;
+ }
+
+ packet.payload_length = max_receive;
+ packet.interrupt = 1;
+ packet.skip = 0;
+ packet.tag = 3;
+ packet.sy = 0;
+ packet.header_length = IEEE1394_GASP_HDR_SIZE;
+ offset = 0;
+
+ for (u = 0; u < num_packets; u++) {
+ retval = fw_iso_context_queue(context, &packet,
+ &dev->broadcast_rcv_buffer, offset);
+ if (retval < 0)
+ goto failed_rcv_queue;
+
+ offset += max_receive;
+ }
+ dev->num_broadcast_rcv_ptrs = num_packets;
+ dev->rcv_buffer_size = max_receive;
+ dev->broadcast_rcv_next_ptr = 0U;
+ retval = fw_iso_context_start(context, -1, 0,
+ FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
+ if (retval < 0)
+ goto failed_rcv_queue;
+
+ /* FIXME: adjust it according to the min. speed of all known peers? */
+ dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
+ - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
+ dev->broadcast_state = FWNET_BROADCAST_RUNNING;
+
+ return 0;
+
+ failed_rcv_queue:
+ kfree(dev->broadcast_rcv_buffer_ptrs);
+ dev->broadcast_rcv_buffer_ptrs = NULL;
+ failed_ptrs_alloc:
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
+ failed_buffer_init:
+ fw_iso_context_destroy(context);
+ dev->broadcast_rcv_context = NULL;
+ failed_context_create:
+ fw_core_remove_address_handler(&dev->handler);
+ failed_initial:
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+
+ return retval;
+}
+
+/* ifup */
+static int fwnet_open(struct net_device *net)
+{
+ struct fwnet_device *dev = netdev_priv(net);
+ int ret;
+
+ if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
+ ret = fwnet_broadcast_start(dev);
+ if (ret)
+ return ret;
+ }
+ netif_start_queue(net);
+
+ return 0;
+}
+
+/* ifdown */
+static int fwnet_stop(struct net_device *net)
+{
+ netif_stop_queue(net);
+
+ /* Deallocate iso context for use by other applications? */
+
+ return 0;
+}
+
+static int fwnet_tx(struct sk_buff *skb, struct net_device *net)
+{
+ struct fwnet_header hdr_buf;
+ struct fwnet_device *dev = netdev_priv(net);
+ __be16 proto;
+ u16 dest_node;
+ unsigned max_payload;
+ u16 dg_size;
+ u16 *datagram_label_ptr;
+ struct fwnet_packet_task *ptask;
+ struct fwnet_peer *peer;
+ unsigned long flags;
+
+ ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
+ if (ptask == NULL)
+ goto fail;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto fail;
+
+ /*
+ * Make a copy of the driver-specific header.
+ * We might need to rebuild the header on tx failure.
+ */
+ memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
+ skb_pull(skb, sizeof(hdr_buf));
+
+ proto = hdr_buf.h_proto;
+ dg_size = skb->len;
+
+ /* serialize access to peer, including peer->datagram_label */
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /*
+ * Set the transmission type for the packet. ARP packets and IP
+ * broadcast packets are sent via GASP.
+ */
+ if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
+ || proto == htons(ETH_P_ARP)
+ || (proto == htons(ETH_P_IP)
+ && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
+ max_payload = dev->broadcast_xmt_max_payload;
+ datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
+
+ ptask->fifo_addr = FWNET_NO_FIFO_ADDR;
+ ptask->generation = 0;
+ ptask->dest_node = IEEE1394_ALL_NODES;
+ ptask->speed = SCODE_100;
+ } else {
+ __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
+ u8 generation;
+
+ peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
+ if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
+ goto fail_unlock;
+
+ generation = peer->generation;
+ dest_node = peer->node_id;
+ max_payload = peer->max_payload;
+ datagram_label_ptr = &peer->datagram_label;
+
+ ptask->fifo_addr = peer->fifo;
+ ptask->generation = generation;
+ ptask->dest_node = dest_node;
+ ptask->speed = peer->speed;
+ }
+
+ /* If this is an ARP packet, convert it */
+ if (proto == htons(ETH_P_ARP)) {
+ struct arphdr *arp = (struct arphdr *)skb->data;
+ unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+ struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
+ __be32 ipaddr;
+
+ ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
+
+ arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
+ arp1394->max_rec = dev->card->max_receive;
+ arp1394->sspd = dev->card->link_speed;
+
+ put_unaligned_be16(dev->local_fifo >> 32,
+ &arp1394->fifo_hi);
+ put_unaligned_be32(dev->local_fifo & 0xffffffff,
+ &arp1394->fifo_lo);
+ put_unaligned(ipaddr, &arp1394->sip);
+ }
+
+ ptask->hdr.w0 = 0;
+ ptask->hdr.w1 = 0;
+ ptask->skb = skb;
+ ptask->dev = dev;
+
+ /* Does it all fit in one packet? */
+ if (dg_size <= max_payload) {
+ fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
+ ptask->outstanding_pkts = 1;
+ max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
+ } else {
+ u16 datagram_label;
+
+ max_payload -= RFC2374_FRAG_OVERHEAD;
+ datagram_label = (*datagram_label_ptr)++;
+ fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
+ datagram_label);
+ ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
+ max_payload += RFC2374_FRAG_HDR_SIZE;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ptask->max_payload = max_payload;
+ fwnet_send_packet(ptask);
+
+ return NETDEV_TX_OK;
+
+ fail_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ fail:
+ if (ptask)
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+
+ if (skb != NULL)
+ dev_kfree_skb(skb);
+
+ net->stats.tx_dropped++;
+ net->stats.tx_errors++;
+
+ /*
+ * FIXME: According to a patch from 2003-02-26, "returning non-zero
+ * causes serious problems" here, allegedly. Before that patch,
+ * -ERRNO was returned which is not appropriate under Linux 2.6.
+ * Perhaps more needs to be done? Stop the queue in serious
+ * conditions and restart it elsewhere?
+ */
+ return NETDEV_TX_OK;
+}
+
+static int fwnet_change_mtu(struct net_device *net, int new_mtu)
+{
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ net->mtu = new_mtu;
+ return 0;
+}
+
+static void fwnet_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->bus_info, "ieee1394");
+}
+
+static struct ethtool_ops fwnet_ethtool_ops = {
+ .get_drvinfo = fwnet_get_drvinfo,
+};
+
+static const struct net_device_ops fwnet_netdev_ops = {
+ .ndo_open = fwnet_open,
+ .ndo_stop = fwnet_stop,
+ .ndo_start_xmit = fwnet_tx,
+ .ndo_change_mtu = fwnet_change_mtu,
+};
+
+static void fwnet_init_dev(struct net_device *net)
+{
+ net->header_ops = &fwnet_header_ops;
+ net->netdev_ops = &fwnet_netdev_ops;
+ net->watchdog_timeo = 2 * HZ;
+ net->flags = IFF_BROADCAST | IFF_MULTICAST;
+ net->features = NETIF_F_HIGHDMA;
+ net->addr_len = FWNET_ALEN;
+ net->hard_header_len = FWNET_HLEN;
+ net->type = ARPHRD_IEEE1394;
+ net->tx_queue_len = 10;
+ SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops);
+}
+
+/* caller must hold fwnet_device_mutex */
+static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
+{
+ struct fwnet_device *dev;
+
+ list_for_each_entry(dev, &fwnet_device_list, dev_link)
+ if (dev->card == card)
+ return dev;
+
+ return NULL;
+}
+
+static int fwnet_add_peer(struct fwnet_device *dev,
+ struct fw_unit *unit, struct fw_device *device)
+{
+ struct fwnet_peer *peer;
+
+ peer = kmalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return -ENOMEM;
+
+ dev_set_drvdata(&unit->device, peer);
+
+ peer->dev = dev;
+ peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
+ peer->fifo = FWNET_NO_FIFO_ADDR;
+ INIT_LIST_HEAD(&peer->pd_list);
+ peer->pdg_size = 0;
+ peer->datagram_label = 0;
+ peer->speed = device->max_speed;
+ peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
+
+ peer->generation = device->generation;
+ smp_rmb();
+ peer->node_id = device->node_id;
+
+ spin_lock_irq(&dev->lock);
+ list_add_tail(&peer->peer_link, &dev->peer_list);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int fwnet_probe(struct device *_dev)
+{
+ struct fw_unit *unit = fw_unit(_dev);
+ struct fw_device *device = fw_parent_device(unit);
+ struct fw_card *card = device->card;
+ struct net_device *net;
+ bool allocated_netdev = false;
+ struct fwnet_device *dev;
+ unsigned max_mtu;
+ int ret;
+
+ mutex_lock(&fwnet_device_mutex);
+
+ dev = fwnet_dev_find(card);
+ if (dev) {
+ net = dev->netdev;
+ goto have_dev;
+ }
+
+ net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev);
+ if (net == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ allocated_netdev = true;
+ SET_NETDEV_DEV(net, card->device);
+ dev = netdev_priv(net);
+
+ spin_lock_init(&dev->lock);
+ dev->broadcast_state = FWNET_BROADCAST_ERROR;
+ dev->broadcast_rcv_context = NULL;
+ dev->broadcast_xmt_max_payload = 0;
+ dev->broadcast_xmt_datagramlabel = 0;
+
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+
+ INIT_LIST_HEAD(&dev->packet_list);
+ INIT_LIST_HEAD(&dev->broadcasted_list);
+ INIT_LIST_HEAD(&dev->sent_list);
+ INIT_LIST_HEAD(&dev->peer_list);
+
+ dev->card = card;
+ dev->netdev = net;
+
+ /*
+ * Use the RFC 2734 default 1500 octets or the maximum payload
+ * as initial MTU
+ */
+ max_mtu = (1 << (card->max_receive + 1))
+ - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE;
+ net->mtu = min(1500U, max_mtu);
+
+ /* Set our hardware address while we're at it */
+ put_unaligned_be64(card->guid, net->dev_addr);
+ put_unaligned_be64(~0ULL, net->broadcast);
+ ret = register_netdev(net);
+ if (ret) {
+ fw_error("Cannot register the driver\n");
+ goto out;
+ }
+
+ list_add_tail(&dev->dev_link, &fwnet_device_list);
+ fw_notify("%s: IPv4 over FireWire on device %016llx\n",
+ net->name, (unsigned long long)card->guid);
+ have_dev:
+ ret = fwnet_add_peer(dev, unit, device);
+ if (ret && allocated_netdev) {
+ unregister_netdev(net);
+ list_del(&dev->dev_link);
+ }
+ out:
+ if (ret && allocated_netdev)
+ free_netdev(net);
+
+ mutex_unlock(&fwnet_device_mutex);
+
+ return ret;
+}
+
+static void fwnet_remove_peer(struct fwnet_peer *peer)
+{
+ struct fwnet_partial_datagram *pd, *pd_next;
+
+ spin_lock_irq(&peer->dev->lock);
+ list_del(&peer->peer_link);
+ spin_unlock_irq(&peer->dev->lock);
+
+ list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
+ fwnet_pd_delete(pd);
+
+ kfree(peer);
+}
+
+static int fwnet_remove(struct device *_dev)
+{
+ struct fwnet_peer *peer = dev_get_drvdata(_dev);
+ struct fwnet_device *dev = peer->dev;
+ struct net_device *net;
+ struct fwnet_packet_task *ptask, *pt_next;
+
+ mutex_lock(&fwnet_device_mutex);
+
+ fwnet_remove_peer(peer);
+
+ if (list_empty(&dev->peer_list)) {
+ net = dev->netdev;
+ unregister_netdev(net);
+
+ if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
+ fw_core_remove_address_handler(&dev->handler);
+ if (dev->broadcast_rcv_context) {
+ fw_iso_context_stop(dev->broadcast_rcv_context);
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
+ dev->card);
+ fw_iso_context_destroy(dev->broadcast_rcv_context);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->packet_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->broadcasted_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->sent_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_del(&dev->dev_link);
+
+ free_netdev(net);
+ }
+
+ mutex_unlock(&fwnet_device_mutex);
+
+ return 0;
+}
+
+/*
+ * FIXME abort partially sent fragmented datagrams,
+ * discard partially received fragmented datagrams
+ */
+static void fwnet_update(struct fw_unit *unit)
+{
+ struct fw_device *device = fw_parent_device(unit);
+ struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
+ int generation;
+
+ generation = device->generation;
+
+ spin_lock_irq(&peer->dev->lock);
+ peer->node_id = device->node_id;
+ peer->generation = generation;
+ spin_unlock_irq(&peer->dev->lock);
+}
+
+static const struct ieee1394_device_id fwnet_id_table[] = {
+ {
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .specifier_id = IANA_SPECIFIER_ID,
+ .version = RFC2734_SW_VERSION,
+ },
+ { }
+};
+
+static struct fw_driver fwnet_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "net",
+ .bus = &fw_bus_type,
+ .probe = fwnet_probe,
+ .remove = fwnet_remove,
+ },
+ .update = fwnet_update,
+ .id_table = fwnet_id_table,
+};
+
+static const u32 rfc2374_unit_directory_data[] = {
+ 0x00040000, /* directory_length */
+ 0x1200005e, /* unit_specifier_id: IANA */
+ 0x81000003, /* textual descriptor offset */
+ 0x13000001, /* unit_sw_version: RFC 2734 */
+ 0x81000005, /* textual descriptor offset */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49414e41, /* I A N A */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49507634, /* I P v 4 */
+};
+
+static struct fw_descriptor rfc2374_unit_directory = {
+ .length = ARRAY_SIZE(rfc2374_unit_directory_data),
+ .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
+ .data = rfc2374_unit_directory_data
+};
+
+static int __init fwnet_init(void)
+{
+ int err;
+
+ err = fw_core_add_descriptor(&rfc2374_unit_directory);
+ if (err)
+ return err;
+
+ fwnet_packet_task_cache = kmem_cache_create("packet_task",
+ sizeof(struct fwnet_packet_task), 0, 0, NULL);
+ if (!fwnet_packet_task_cache) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = driver_register(&fwnet_driver.driver);
+ if (!err)
+ return 0;
+
+ kmem_cache_destroy(fwnet_packet_task_cache);
+out:
+ fw_core_remove_descriptor(&rfc2374_unit_directory);
+
+ return err;
+}
+module_init(fwnet_init);
+
+static void __exit fwnet_cleanup(void)
+{
+ driver_unregister(&fwnet_driver.driver);
+ kmem_cache_destroy(fwnet_packet_task_cache);
+ fw_core_remove_descriptor(&rfc2374_unit_directory);
+}
+module_exit(fwnet_cleanup);
+
+MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
+MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c961fe4..39b393d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -81,6 +81,7 @@
config DRM_I915
tristate "i915 driver"
+ depends on AGP_INTEL
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4e89ab0..fe23f29 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -16,6 +16,7 @@
drm-$(CONFIG_COMPAT) += drm_ioc32.o
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
@@ -26,4 +27,3 @@
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VIA) +=via/
-obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 7d08352..80cc6d0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -294,10 +294,10 @@
unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo;
unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo;
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
- unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 8 | pt->hsync_offset_lo;
- unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 6 | pt->hsync_pulse_width_lo;
- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) | (pt->vsync_offset_pulse_width_lo & 0xf);
- unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
if (hactive < 64 || vactive < 64)
@@ -347,8 +347,8 @@
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
- mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 51c5a05..30d6b99 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,8 @@
intel_crt.o \
intel_lvds.o \
intel_bios.o \
+ intel_dp.o \
+ intel_dp_i2c.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index e747ac4..288fc50 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -37,7 +37,7 @@
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
- struct intel_i2c_chan *i2c_bus;
+ struct i2c_adapter *i2c_bus;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
@@ -52,7 +52,7 @@
* Returns NULL if the device does not exist.
*/
bool (*init)(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus);
+ struct i2c_adapter *i2cbus);
/*
* Called to allow the output a chance to create properties after the
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 03d4b49..621815b 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -176,19 +176,20 @@
static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
{
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -208,10 +209,11 @@
static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
{
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -228,8 +230,9 @@
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
struct ch7017_priv *priv;
uint8_t val;
@@ -237,8 +240,7 @@
if (priv == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
@@ -248,7 +250,7 @@
val != CH7018_DEVICE_ID_VALUE &&
val != CH7019_DEVICE_ID_VALUE) {
DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
- val, i2cbus->adapter.name,i2cbus->slave_addr);
+ val, i2cbus->adapter.name,dvo->slave_addr);
goto fail;
}
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index d2fd95d..a9b8962 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -123,19 +123,20 @@
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -152,7 +153,7 @@
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -161,10 +162,11 @@
static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -178,14 +180,14 @@
if (!ch7xxx->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
static bool ch7xxx_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
@@ -196,8 +198,7 @@
if (ch7xxx == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = ch7xxx;
ch7xxx->quiet = true;
@@ -207,7 +208,7 @@
name = ch7xxx_get_id(vendor);
if (!name) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
- vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ vendor, adapter->name, dvo->slave_addr);
goto out;
}
@@ -217,7 +218,7 @@
if (device != CH7xxx_DID) {
DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
- vendor, i2cbus->adapter.name, i2cbus->slave_addr);
+ vendor, adapter->name, dvo->slave_addr);
goto out;
}
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0c8d375..aa176f9 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -169,13 +169,14 @@
static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[1];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 0,
},
@@ -186,7 +187,7 @@
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD | I2C_M_NOSTART,
.len = 2,
.buf = in_buf,
@@ -202,7 +203,7 @@
if (!priv->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -211,10 +212,11 @@
static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[3];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 3,
.buf = out_buf,
@@ -229,7 +231,7 @@
if (!priv->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -237,7 +239,7 @@
/** Probes the given bus and slave address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
uint16_t temp;
@@ -246,8 +248,7 @@
if (priv == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
priv->quiet = true;
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index 033a4bb..e1c1f73 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -76,19 +76,20 @@
static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -105,7 +106,7 @@
if (!sil->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -113,10 +114,11 @@
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil= dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -130,7 +132,7 @@
if (!sil->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -138,7 +140,7 @@
/* Silicon Image 164 driver for chip on i2c bus */
static bool sil164_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the SIL164 chip on the specified i2c bus */
struct sil164_priv *sil;
@@ -148,8 +150,7 @@
if (sil == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = sil;
sil->quiet = true;
@@ -158,7 +159,7 @@
if (ch != (SIL164_VID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
- ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ ch, adapter->name, dvo->slave_addr);
goto out;
}
@@ -167,7 +168,7 @@
if (ch != (SIL164_DID & 0xff)) {
DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
- ch, i2cbus->adapter.name, i2cbus->slave_addr);
+ ch, adapter->name, dvo->slave_addr);
goto out;
}
sil->quiet = false;
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 207fda8..9ecc907 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -101,19 +101,20 @@
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
@@ -130,7 +131,7 @@
if (!tfp->quiet) {
DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
}
@@ -138,10 +139,11 @@
static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
- struct intel_i2c_chan *i2cbus = dvo->i2c_bus;
+ struct i2c_adapter *adapter = dvo->i2c_bus;
+ struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
uint8_t out_buf[2];
struct i2c_msg msg = {
- .addr = i2cbus->slave_addr,
+ .addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -155,7 +157,7 @@
if (!tfp->quiet) {
DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
- addr, i2cbus->adapter.name, i2cbus->slave_addr);
+ addr, i2cbus->adapter.name, dvo->slave_addr);
}
return false;
@@ -174,7 +176,7 @@
/* Ti TFP410 driver for chip on i2c bus */
static bool tfp410_init(struct intel_dvo_device *dvo,
- struct intel_i2c_chan *i2cbus)
+ struct i2c_adapter *adapter)
{
/* this will detect the tfp410 chip on the specified i2c bus */
struct tfp410_priv *tfp;
@@ -184,20 +186,19 @@
if (tfp == NULL)
return false;
- dvo->i2c_bus = i2cbus;
- dvo->i2c_bus->slave_addr = dvo->slave_addr;
+ dvo->i2c_bus = adapter;
dvo->dev_priv = tfp;
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
- id, i2cbus->adapter.name, i2cbus->slave_addr);
+ id, adapter->name, dvo->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
- id, i2cbus->adapter.name, i2cbus->slave_addr);
+ id, adapter->name, dvo->slave_addr);
goto out;
}
tfp->quiet = false;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98560e1..e3cb402 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,8 +67,6 @@
pci_save_state(dev->pdev);
- i915_save_state(dev);
-
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (i915_gem_idle(dev))
@@ -77,6 +75,8 @@
drm_irq_uninstall(dev);
}
+ i915_save_state(dev);
+
intel_opregion_free(dev, 1);
if (state.event == PM_EVENT_SUSPEND) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a84f04..bb4c2d3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -306,6 +306,17 @@
u32 saveCURBPOS;
u32 saveCURBBASE;
u32 saveCURSIZE;
+ u32 saveDP_B;
+ u32 saveDP_C;
+ u32 saveDP_D;
+ u32 savePIPEA_GMCH_DATA_M;
+ u32 savePIPEB_GMCH_DATA_M;
+ u32 savePIPEA_GMCH_DATA_N;
+ u32 savePIPEB_GMCH_DATA_N;
+ u32 savePIPEA_DP_LINK_M;
+ u32 savePIPEB_DP_LINK_M;
+ u32 savePIPEA_DP_LINK_N;
+ u32 savePIPEB_DP_LINK_N;
struct {
struct drm_mm gtt_space;
@@ -857,6 +868,7 @@
#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
IS_I915GM(dev)))
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fd2b8bd..876b65c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1006,7 +1006,7 @@
mutex_lock(&dev->struct_mutex);
#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+ DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
obj, obj->size, read_domains, write_domain);
#endif
if (read_domains & I915_GEM_DOMAIN_GTT) {
@@ -1050,7 +1050,7 @@
}
#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %d)\n",
+ DRM_INFO("%s: sw_finish %d (%p %zd)\n",
__func__, args->handle, obj, obj->size);
#endif
obj_priv = obj->driver_private;
@@ -2423,7 +2423,7 @@
}
#if WATCH_BUF
- DRM_INFO("Binding object of size %d at 0x%08x\n",
+ DRM_INFO("Binding object of size %zd at 0x%08x\n",
obj->size, obj_priv->gtt_offset);
#endif
ret = i915_gem_object_get_pages(obj);
@@ -4227,6 +4227,7 @@
void
i915_gem_load(struct drm_device *dev)
{
+ int i;
drm_i915_private_t *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->mm.active_list_lock);
@@ -4246,6 +4247,18 @@
else
dev_priv->num_fence_regs = 8;
+ /* Initialize fence registers to zero */
+ if (IS_I965G(dev)) {
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
+ } else {
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+ }
+
i915_gem_detect_bit_6_swizzle(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 8d0b943..e602614 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -87,7 +87,7 @@
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
- i915_gem_dump_page(obj_priv->page_list[page],
+ i915_gem_dump_page(obj_priv->pages[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
page * PAGE_SIZE,
@@ -143,7 +143,7 @@
uint32_t *backing_map = NULL;
int bad_count = 0;
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
obj->size / 1024);
@@ -157,7 +157,7 @@
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+ backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 5c1ceec..daeae62 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -114,11 +114,13 @@
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
if (mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
ret = 0;
goto out_put;
}
+#endif
/* Get some space for it */
ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b86b7b7..228546f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -232,7 +232,17 @@
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
hotplug_work);
struct drm_device *dev = dev_priv->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ if (mode_config->num_connector) {
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->hot_plug)
+ (*intel_output->hot_plug) (intel_output);
+ }
+ }
/* Just fire off a uevent and let userspace tell us what to do */
drm_sysfs_hotplug_event(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index dc425e7..e4b4e88 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -419,7 +419,7 @@
return;
if (!suspend)
- acpi_video_exit();
+ acpi_video_unregister();
opregion->acpi->drdy = 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f6237a0..88bf752 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -569,6 +569,19 @@
#define C0DRB3 0x10206
#define C1DRB3 0x10606
+/* Clocking configuration register */
+#define CLKCFG 0x10c00
+#define CLKCFG_FSB_400 (0 << 0) /* hrawclk 100 */
+#define CLKCFG_FSB_533 (1 << 0) /* hrawclk 133 */
+#define CLKCFG_FSB_667 (3 << 0) /* hrawclk 166 */
+#define CLKCFG_FSB_800 (2 << 0) /* hrawclk 200 */
+#define CLKCFG_FSB_1067 (6 << 0) /* hrawclk 266 */
+#define CLKCFG_FSB_1333 (7 << 0) /* hrawclk 333 */
+/* this is a guess, could be 5 as well */
+#define CLKCFG_FSB_1600 (4 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_1600_ALT (5 << 0) /* hrawclk 400 */
+#define CLKCFG_FSB_MASK (7 << 0)
+
/** GM965 GM45 render standby register */
#define MCHBAR_RENDER_STANDBY 0x111B8
@@ -834,9 +847,25 @@
#define HORIZ_INTERP_MASK (3 << 6)
#define HORIZ_AUTO_SCALE (1 << 5)
#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define PFIT_FILTER_FUZZY (0 << 24)
+#define PFIT_SCALING_AUTO (0 << 26)
+#define PFIT_SCALING_PROGRAMMED (1 << 26)
+#define PFIT_SCALING_PILLAR (2 << 26)
+#define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS 0x61234
#define PFIT_VERT_SCALE_MASK 0xfff00000
#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* Pre-965 */
+#define PFIT_VERT_SCALE_SHIFT 20
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_SHIFT 4
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+/* 965+ */
+#define PFIT_VERT_SCALE_SHIFT_965 16
+#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
+#define PFIT_HORIZ_SCALE_SHIFT_965 0
+#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
+
#define PFIT_AUTO_RATIOS 0x61238
/* Backlight control */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a98e283..8d8e083 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -322,6 +322,20 @@
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ dev_priv->saveDP_B = I915_READ(DP_B);
+ dev_priv->saveDP_C = I915_READ(DP_C);
+ dev_priv->saveDP_D = I915_READ(DP_D);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
+ }
/* FIXME: save TV & SDVO state */
/* FBC state */
@@ -404,7 +418,19 @@
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
}
-
+
+ /* Display port ratios (must be done before clock is set) */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ }
+
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -518,6 +544,12 @@
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+ /* Display Port state */
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ I915_WRITE(DP_B, dev_priv->saveDP_B);
+ I915_WRITE(DP_C, dev_priv->saveDP_C);
+ I915_WRITE(DP_D, dev_priv->saveDP_D);
+ }
/* FIXME: restore TV & SDVO state */
/* FBC info */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index cdd126d..716409a 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -99,9 +99,11 @@
{
struct bdb_lvds_options *lvds_options;
struct bdb_lvds_lfp_data *lvds_lfp_data;
+ struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
struct bdb_lvds_lfp_data_entry *entry;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
+ int lfp_data_size;
/* Defaults if we can't find VBT info */
dev_priv->lvds_dither = 0;
@@ -119,9 +121,17 @@
if (!lvds_lfp_data)
return;
+ lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS);
+ if (!lvds_lfp_data_ptrs)
+ return;
+
dev_priv->lvds_vbt = 1;
- entry = &lvds_lfp_data->data[lvds_options->panel_type];
+ lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ entry = (struct bdb_lvds_lfp_data_entry *)
+ ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
+ lvds_options->panel_type));
dvo_timing = &entry->dvo_timing;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3e1c781..73e7b9c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,6 +29,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "intel_dp.h"
#include "drm_crtc_helper.h"
@@ -127,19 +128,6 @@
#define I9XX_P2_LVDS_FAST 7
#define I9XX_P2_LVDS_SLOW_LIMIT 112000
-#define INTEL_LIMIT_I8XX_DVO_DAC 0
-#define INTEL_LIMIT_I8XX_LVDS 1
-#define INTEL_LIMIT_I9XX_SDVO_DAC 2
-#define INTEL_LIMIT_I9XX_LVDS 3
-#define INTEL_LIMIT_G4X_SDVO 4
-#define INTEL_LIMIT_G4X_HDMI_DAC 5
-#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
-#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
-#define INTEL_LIMIT_IGD_SDVO_DAC 8
-#define INTEL_LIMIT_IGD_LVDS 9
-#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
-#define INTEL_LIMIT_IGDNG_LVDS 11
-
/*The parameter is for SDVO on G4x platform*/
#define G4X_DOT_SDVO_MIN 25000
#define G4X_DOT_SDVO_MAX 270000
@@ -218,6 +206,25 @@
#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
+/*The parameter is for DISPLAY PORT on G4x platform*/
+#define G4X_DOT_DISPLAY_PORT_MIN 161670
+#define G4X_DOT_DISPLAY_PORT_MAX 227000
+#define G4X_N_DISPLAY_PORT_MIN 1
+#define G4X_N_DISPLAY_PORT_MAX 2
+#define G4X_M_DISPLAY_PORT_MIN 97
+#define G4X_M_DISPLAY_PORT_MAX 108
+#define G4X_M1_DISPLAY_PORT_MIN 0x10
+#define G4X_M1_DISPLAY_PORT_MAX 0x12
+#define G4X_M2_DISPLAY_PORT_MIN 0x05
+#define G4X_M2_DISPLAY_PORT_MAX 0x06
+#define G4X_P_DISPLAY_PORT_MIN 10
+#define G4X_P_DISPLAY_PORT_MAX 20
+#define G4X_P1_DISPLAY_PORT_MIN 1
+#define G4X_P1_DISPLAY_PORT_MAX 2
+#define G4X_P2_DISPLAY_PORT_SLOW 10
+#define G4X_P2_DISPLAY_PORT_FAST 10
+#define G4X_P2_DISPLAY_PORT_LIMIT 0
+
/* IGDNG */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
@@ -256,8 +263,11 @@
intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
-static const intel_limit_t intel_limits[] = {
- { /* INTEL_LIMIT_I8XX_DVO_DAC */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock);
+
+static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
.n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -269,8 +279,9 @@
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I8XX_LVDS */
+};
+
+static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
.vco = { .min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX },
.n = { .min = I8XX_N_MIN, .max = I8XX_N_MAX },
@@ -282,8 +293,9 @@
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I9XX_SDVO_DAC */
+};
+
+static const intel_limit_t intel_limits_i9xx_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
.n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -295,8 +307,9 @@
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_I9XX_LVDS */
+};
+
+static const intel_limit_t intel_limits_i9xx_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX },
.n = { .min = I9XX_N_MIN, .max = I9XX_N_MAX },
@@ -311,9 +324,10 @@
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
- },
+};
+
/* below parameter and function is for G4X Chipset Family*/
- { /* INTEL_LIMIT_G4X_SDVO */
+static const intel_limit_t intel_limits_g4x_sdvo = {
.dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
@@ -327,8 +341,9 @@
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_HDMI_DAC */
+};
+
+static const intel_limit_t intel_limits_g4x_hdmi = {
.dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
@@ -342,8 +357,9 @@
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
+};
+
+static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
@@ -365,8 +381,9 @@
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
+};
+
+static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
@@ -388,8 +405,32 @@
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGD_SDVO */
+};
+
+static const intel_limit_t intel_limits_g4x_display_port = {
+ .dot = { .min = G4X_DOT_DISPLAY_PORT_MIN,
+ .max = G4X_DOT_DISPLAY_PORT_MAX },
+ .vco = { .min = G4X_VCO_MIN,
+ .max = G4X_VCO_MAX},
+ .n = { .min = G4X_N_DISPLAY_PORT_MIN,
+ .max = G4X_N_DISPLAY_PORT_MAX },
+ .m = { .min = G4X_M_DISPLAY_PORT_MIN,
+ .max = G4X_M_DISPLAY_PORT_MAX },
+ .m1 = { .min = G4X_M1_DISPLAY_PORT_MIN,
+ .max = G4X_M1_DISPLAY_PORT_MAX },
+ .m2 = { .min = G4X_M2_DISPLAY_PORT_MIN,
+ .max = G4X_M2_DISPLAY_PORT_MAX },
+ .p = { .min = G4X_P_DISPLAY_PORT_MIN,
+ .max = G4X_P_DISPLAY_PORT_MAX },
+ .p1 = { .min = G4X_P1_DISPLAY_PORT_MIN,
+ .max = G4X_P1_DISPLAY_PORT_MAX},
+ .p2 = { .dot_limit = G4X_P2_DISPLAY_PORT_LIMIT,
+ .p2_slow = G4X_P2_DISPLAY_PORT_SLOW,
+ .p2_fast = G4X_P2_DISPLAY_PORT_FAST },
+ .find_pll = intel_find_pll_g4x_dp,
+};
+
+static const intel_limit_t intel_limits_igd_sdvo = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -401,8 +442,9 @@
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGD_LVDS */
+};
+
+static const intel_limit_t intel_limits_igd_lvds = {
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
@@ -415,8 +457,9 @@
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
.find_pll = intel_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGDNG_SDVO_DAC */
+};
+
+static const intel_limit_t intel_limits_igdng_sdvo = {
.dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
.vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
.n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -429,8 +472,9 @@
.p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
.p2_fast = IGDNG_P2_SDVO_DAC_FAST },
.find_pll = intel_igdng_find_best_PLL,
- },
- { /* INTEL_LIMIT_IGDNG_LVDS */
+};
+
+static const intel_limit_t intel_limits_igdng_lvds = {
.dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
.vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
.n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
@@ -443,16 +487,15 @@
.p2_slow = IGDNG_P2_LVDS_SLOW,
.p2_fast = IGDNG_P2_LVDS_FAST },
.find_pll = intel_igdng_find_best_PLL,
- },
};
static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
{
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS];
+ limit = &intel_limits_igdng_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC];
+ limit = &intel_limits_igdng_sdvo;
return limit;
}
@@ -467,19 +510,19 @@
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
/* LVDS with dual channel */
- limit = &intel_limits
- [INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
+ limit = &intel_limits_g4x_dual_channel_lvds;
else
/* LVDS with dual channel */
- limit = &intel_limits
- [INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
+ limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
+ limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
+ limit = &intel_limits_g4x_sdvo;
+ } else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
- limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ limit = &intel_limits_i9xx_sdvo;
return limit;
}
@@ -495,19 +538,19 @@
limit = intel_g4x_limit(crtc);
} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
+ limit = &intel_limits_i9xx_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+ limit = &intel_limits_i9xx_sdvo;
} else if (IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
+ limit = &intel_limits_igd_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
+ limit = &intel_limits_igd_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
+ limit = &intel_limits_i8xx_lvds;
else
- limit = &intel_limits[INTEL_LIMIT_I8XX_DVO_DAC];
+ limit = &intel_limits_i8xx_dvo;
}
return limit;
}
@@ -764,6 +807,35 @@
return found;
}
+/* DisplayPort has only two frequencies, 162MHz and 270MHz */
+static bool
+intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *best_clock)
+{
+ intel_clock_t clock;
+ if (target < 200000) {
+ clock.dot = 161670;
+ clock.p = 20;
+ clock.p1 = 2;
+ clock.p2 = 10;
+ clock.n = 0x01;
+ clock.m = 97;
+ clock.m1 = 0x10;
+ clock.m2 = 0x05;
+ } else {
+ clock.dot = 270000;
+ clock.p = 10;
+ clock.p1 = 1;
+ clock.p2 = 10;
+ clock.n = 0x02;
+ clock.m = 108;
+ clock.m1 = 0x12;
+ clock.m2 = 0x06;
+ }
+ memcpy(best_clock, &clock, sizeof(intel_clock_t));
+ return true;
+}
+
void
intel_wait_for_vblank(struct drm_device *dev)
{
@@ -1541,7 +1613,7 @@
intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false, is_dvo = false;
- bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
const intel_limit_t *limit;
@@ -1585,6 +1657,9 @@
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ is_dp = true;
+ break;
}
num_outputs++;
@@ -1600,6 +1675,7 @@
} else {
refclk = 48000;
}
+
/*
* Returns a set of divisors for the desired target clock with the given
@@ -1662,6 +1738,8 @@
else if (IS_IGDNG(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
+ if (is_dp)
+ dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_IGD(dev))
@@ -1809,6 +1887,8 @@
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
+ if (is_dp)
+ intel_dp_set_m_n(crtc, mode, adjusted_mode);
I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
@@ -2475,6 +2555,8 @@
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
@@ -2487,7 +2569,11 @@
found = intel_sdvo_init(dev, SDVOC);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);
+ if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ intel_dp_init(dev, DP_C);
}
+ if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
+ intel_dp_init(dev, DP_D);
} else
intel_dvo_init(dev);
@@ -2530,6 +2616,11 @@
(1 << 1));
clone_mask = (1 << INTEL_OUTPUT_TVOUT);
break;
+ case INTEL_OUTPUT_DISPLAYPORT:
+ crtc_mask = ((1 << 0) |
+ (1 << 1));
+ clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ break;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones = intel_connector_clones(dev, clone_mask);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
new file mode 100644
index 0000000..8f8d37d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -0,0 +1,1153 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include <linux/i2c.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "intel_dp.h"
+
+#define DP_LINK_STATUS_SIZE 6
+#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+
+#define DP_LINK_CONFIGURATION_SIZE 9
+
+struct intel_dp_priv {
+ uint32_t output_reg;
+ uint32_t DP;
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ uint32_t save_DP;
+ uint8_t save_link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ bool has_audio;
+ int dpms_mode;
+ uint8_t link_bw;
+ uint8_t lane_count;
+ uint8_t dpcd[4];
+ struct intel_output *intel_output;
+ struct i2c_adapter adapter;
+ struct i2c_algo_dp_aux_data algo;
+};
+
+static void
+intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
+
+static void
+intel_dp_link_down(struct intel_output *intel_output, uint32_t DP);
+
+static int
+intel_dp_max_lane_count(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int max_lane_count = 4;
+
+ if (dp_priv->dpcd[0] >= 0x11) {
+ max_lane_count = dp_priv->dpcd[2] & 0x1f;
+ switch (max_lane_count) {
+ case 1: case 2: case 4:
+ break;
+ default:
+ max_lane_count = 4;
+ }
+ }
+ return max_lane_count;
+}
+
+static int
+intel_dp_max_link_bw(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int max_link_bw = dp_priv->dpcd[1];
+
+ switch (max_link_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ break;
+ default:
+ max_link_bw = DP_LINK_BW_1_62;
+ break;
+ }
+ return max_link_bw;
+}
+
+static int
+intel_dp_link_clock(uint8_t link_bw)
+{
+ if (link_bw == DP_LINK_BW_2_7)
+ return 270000;
+ else
+ return 162000;
+}
+
+/* I think this is a fiction */
+static int
+intel_dp_link_required(int pixel_clock)
+{
+ return pixel_clock * 3;
+}
+
+static int
+intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
+ int max_lanes = intel_dp_max_lane_count(intel_output);
+
+ if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static uint32_t
+pack_aux(uint8_t *src, int src_bytes)
+{
+ int i;
+ uint32_t v = 0;
+
+ if (src_bytes > 4)
+ src_bytes = 4;
+ for (i = 0; i < src_bytes; i++)
+ v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ return v;
+}
+
+static void
+unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+{
+ int i;
+ if (dst_bytes > 4)
+ dst_bytes = 4;
+ for (i = 0; i < dst_bytes; i++)
+ dst[i] = src >> ((3-i) * 8);
+}
+
+/* hrawclock is 1/4 the FSB frequency */
+static int
+intel_hrawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t clkcfg;
+
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100;
+ case CLKCFG_FSB_533:
+ return 133;
+ case CLKCFG_FSB_667:
+ return 166;
+ case CLKCFG_FSB_800:
+ return 200;
+ case CLKCFG_FSB_1067:
+ return 266;
+ case CLKCFG_FSB_1333:
+ return 333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400;
+ default:
+ return 133;
+ }
+}
+
+static int
+intel_dp_aux_ch(struct intel_output *intel_output,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint32_t output_reg = dp_priv->output_reg;
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = output_reg + 0x10;
+ uint32_t ch_data = ch_ctl + 4;
+ int i;
+ int recv_bytes;
+ uint32_t ctl;
+ uint32_t status;
+ uint32_t aux_clock_divider;
+ int try;
+
+ /* The clock divider is based off the hrawclk,
+ * and would like to run at 2MHz. So, take the
+ * hrawclk value and divide by 2 and use that
+ */
+ aux_clock_divider = intel_hrawclk(dev) / 2;
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4) {
+ uint32_t d = pack_aux(send + i, send_bytes - i);;
+
+ I915_WRITE(ch_data + i, d);
+ }
+
+ ctl = (DP_AUX_CH_CTL_SEND_BUSY |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (5 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl, ctl);
+ (void) I915_READ(ch_ctl);
+ for (;;) {
+ udelay(100);
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ }
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl, (ctl |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR));
+ (void) I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0)
+ break;
+ }
+
+ if ((status & DP_AUX_CH_CTL_DONE) == 0) {
+ printk(KERN_ERR "dp_aux_ch not done status 0x%08x\n", status);
+ return -EBUSY;
+ }
+
+ /* Check for timeout or receive error.
+ * Timeouts occur when the sink is not connected
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ printk(KERN_ERR "dp_aux_ch receive error status 0x%08x\n", status);
+ return -EIO;
+ }
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+ printk(KERN_ERR "dp_aux_ch timeout status 0x%08x\n", status);
+ return -ETIMEDOUT;
+ }
+
+ /* Unload any bytes sent back from the other side */
+ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
+ DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
+
+ if (recv_bytes > recv_size)
+ recv_bytes = recv_size;
+
+ for (i = 0; i < recv_bytes; i += 4) {
+ uint32_t d = I915_READ(ch_data + i);
+
+ unpack_aux(d, recv + i, recv_bytes - i);
+ }
+
+ return recv_bytes;
+}
+
+/* Write data to the aux channel in native mode */
+static int
+intel_dp_aux_native_write(struct intel_output *intel_output,
+ uint16_t address, uint8_t *send, int send_bytes)
+{
+ int ret;
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
+
+ if (send_bytes > 16)
+ return -1;
+ msg[0] = AUX_NATIVE_WRITE << 4;
+ msg[1] = address >> 8;
+ msg[2] = address;
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+ break;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+ return send_bytes;
+}
+
+/* Write a single byte to the aux channel in native mode */
+static int
+intel_dp_aux_native_write_1(struct intel_output *intel_output,
+ uint16_t address, uint8_t byte)
+{
+ return intel_dp_aux_native_write(intel_output, address, &byte, 1);
+}
+
+/* read bytes from a native aux channel */
+static int
+intel_dp_aux_native_read(struct intel_output *intel_output,
+ uint16_t address, uint8_t *recv, int recv_bytes)
+{
+ uint8_t msg[4];
+ int msg_bytes;
+ uint8_t reply[20];
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
+
+ msg[0] = AUX_NATIVE_READ << 4;
+ msg[1] = address >> 8;
+ msg[2] = address & 0xff;
+ msg[3] = recv_bytes - 1;
+
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+ for (;;) {
+ ret = intel_dp_aux_ch(intel_output, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+ return -EPROTO;
+ if (ret < 0)
+ return ret;
+ ack = reply[0];
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
+ memcpy(recv, reply + 1, ret - 1);
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+ udelay(100);
+ else
+ return -EIO;
+ }
+}
+
+static int
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_bytes)
+{
+ struct intel_dp_priv *dp_priv = container_of(adapter,
+ struct intel_dp_priv,
+ adapter);
+ struct intel_output *intel_output = dp_priv->intel_output;
+
+ return intel_dp_aux_ch(intel_output,
+ send, send_bytes, recv, recv_bytes);
+}
+
+static int
+intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ DRM_ERROR("i2c_init %s\n", name);
+ dp_priv->algo.running = false;
+ dp_priv->algo.address = 0;
+ dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;
+
+ memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter));
+ dp_priv->adapter.owner = THIS_MODULE;
+ dp_priv->adapter.class = I2C_CLASS_DDC;
+ strncpy (dp_priv->adapter.name, name, sizeof dp_priv->adapter.name - 1);
+ dp_priv->adapter.name[sizeof dp_priv->adapter.name - 1] = '\0';
+ dp_priv->adapter.algo_data = &dp_priv->algo;
+ dp_priv->adapter.dev.parent = &intel_output->base.kdev;
+
+ return i2c_dp_aux_add_bus(&dp_priv->adapter);
+}
+
+static bool
+intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int lane_count, clock;
+ int max_lane_count = intel_dp_max_lane_count(intel_output);
+ int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
+
+ if (intel_dp_link_required(mode->clock) <= link_avail) {
+ dp_priv->link_bw = bws[clock];
+ dp_priv->lane_count = lane_count;
+ adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+ printk(KERN_ERR "link bw %02x lane count %d clock %d\n",
+ dp_priv->link_bw, dp_priv->lane_count,
+ adjusted_mode->clock);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+struct intel_dp_m_n {
+ uint32_t tu;
+ uint32_t gmch_m;
+ uint32_t gmch_n;
+ uint32_t link_m;
+ uint32_t link_n;
+};
+
+static void
+intel_reduce_ratio(uint32_t *num, uint32_t *den)
+{
+ while (*num > 0xffffff || *den > 0xffffff) {
+ *num >>= 1;
+ *den >>= 1;
+ }
+}
+
+static void
+intel_dp_compute_m_n(int bytes_per_pixel,
+ int nlanes,
+ int pixel_clock,
+ int link_clock,
+ struct intel_dp_m_n *m_n)
+{
+ m_n->tu = 64;
+ m_n->gmch_m = pixel_clock * bytes_per_pixel;
+ m_n->gmch_n = link_clock * nlanes;
+ intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
+ intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
+}
+
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int lane_count = 4;
+ struct intel_dp_m_n m_n;
+
+ /*
+ * Find the lane count in the intel_output private
+ */
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+
+ if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) {
+ lane_count = dp_priv->lane_count;
+ break;
+ }
+ }
+
+ /*
+ * Compute the GMCH and Link ratios. The '3' here is
+ * the number of bytes_per_pixel post-LUT, which we always
+ * set up for 8-bits of R/G/B, or 3 bytes total.
+ */
+ intel_dp_compute_m_n(3, lane_count,
+ mode->clock, adjusted_mode->clock, &m_n);
+
+ if (intel_crtc->pipe == 0) {
+ I915_WRITE(PIPEA_GMCH_DATA_M,
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPEA_GMCH_DATA_N,
+ m_n.gmch_n);
+ I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
+ I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
+ } else {
+ I915_WRITE(PIPEB_GMCH_DATA_M,
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPEB_GMCH_DATA_N,
+ m_n.gmch_n);
+ I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
+ I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
+ }
+}
+
+static void
+intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_crtc *crtc = intel_output->enc.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ dp_priv->DP = (DP_LINK_TRAIN_OFF |
+ DP_VOLTAGE_0_4 |
+ DP_PRE_EMPHASIS_0 |
+ DP_SYNC_VS_HIGH |
+ DP_SYNC_HS_HIGH);
+
+ switch (dp_priv->lane_count) {
+ case 1:
+ dp_priv->DP |= DP_PORT_WIDTH_1;
+ break;
+ case 2:
+ dp_priv->DP |= DP_PORT_WIDTH_2;
+ break;
+ case 4:
+ dp_priv->DP |= DP_PORT_WIDTH_4;
+ break;
+ }
+ if (dp_priv->has_audio)
+ dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE;
+
+ memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+ dp_priv->link_configuration[0] = dp_priv->link_bw;
+ dp_priv->link_configuration[1] = dp_priv->lane_count;
+
+ /*
+ * Check for DPCD version > 1.1,
+ * enable enahanced frame stuff in that case
+ */
+ if (dp_priv->dpcd[0] >= 0x11) {
+ dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ dp_priv->DP |= DP_ENHANCED_FRAMING;
+ }
+
+ if (intel_crtc->pipe == 1)
+ dp_priv->DP |= DP_PIPEB_SELECT;
+}
+
+
+static void
+intel_dp_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t dp_reg = I915_READ(dp_priv->output_reg);
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (dp_reg & DP_PORT_EN)
+ intel_dp_link_down(intel_output, dp_priv->DP);
+ } else {
+ if (!(dp_reg & DP_PORT_EN))
+ intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+ }
+ dp_priv->dpms_mode = mode;
+}
+
+/*
+ * Fetch AUX CH registers 0x202 - 0x207 which contain
+ * link status information
+ */
+static bool
+intel_dp_get_link_status(struct intel_output *intel_output,
+ uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ int ret;
+
+ ret = intel_dp_aux_native_read(intel_output,
+ DP_LANE0_1_STATUS,
+ link_status, DP_LINK_STATUS_SIZE);
+ if (ret != DP_LINK_STATUS_SIZE)
+ return false;
+ return true;
+}
+
+static uint8_t
+intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int r)
+{
+ return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static void
+intel_dp_save(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ dp_priv->save_DP = I915_READ(dp_priv->output_reg);
+ intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET,
+ dp_priv->save_link_configuration,
+ sizeof (dp_priv->save_link_configuration));
+}
+
+static uint8_t
+intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+ DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static uint8_t
+intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+
+#if 0
+static char *voltage_names[] = {
+ "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+ "0dB", "3.5dB", "6dB", "9.5dB"
+};
+static char *link_train_names[] = {
+ "pattern 1", "pattern 2", "idle", "off"
+};
+#endif
+
+/*
+ * These are source-specific values; current Intel hardware supports
+ * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
+ */
+#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
+
+static uint8_t
+intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+}
+
+static void
+intel_get_adjust_train(struct intel_output *intel_output,
+ uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane_count,
+ uint8_t train_set[4])
+{
+ uint8_t v = 0;
+ uint8_t p = 0;
+ int lane;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ uint8_t this_v = intel_get_adjust_request_voltage(link_status, lane);
+ uint8_t this_p = intel_get_adjust_request_pre_emphasis(link_status, lane);
+
+ if (this_v > v)
+ v = this_v;
+ if (this_p > p)
+ p = this_p;
+ }
+
+ if (v >= I830_DP_VOLTAGE_MAX)
+ v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+ if (p >= intel_dp_pre_emphasis_max(v))
+ p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (lane = 0; lane < 4; lane++)
+ train_set[lane] = v | p;
+}
+
+static uint32_t
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
+{
+ uint32_t signal_levels = 0;
+
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ default:
+ signal_levels |= DP_VOLTAGE_0_4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ signal_levels |= DP_VOLTAGE_0_6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ signal_levels |= DP_VOLTAGE_0_8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ signal_levels |= DP_VOLTAGE_1_2;
+ break;
+ }
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPHASIS_0:
+ default:
+ signal_levels |= DP_PRE_EMPHASIS_0;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_3_5:
+ signal_levels |= DP_PRE_EMPHASIS_3_5;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_6:
+ signal_levels |= DP_PRE_EMPHASIS_6;
+ break;
+ case DP_TRAIN_PRE_EMPHASIS_9_5:
+ signal_levels |= DP_PRE_EMPHASIS_9_5;
+ break;
+ }
+ return signal_levels;
+}
+
+static uint8_t
+intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_LANE0_1_STATUS + (lane >> 1);
+ int s = (lane & 1) * 4;
+ uint8_t l = intel_dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+
+/* Check for clock recovery is done on all channels */
+static bool
+intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ int lane;
+ uint8_t lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return false;
+ }
+ return true;
+}
+
+/* Check to see if channel eq is done on all channels */
+#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
+ DP_LANE_CHANNEL_EQ_DONE|\
+ DP_LANE_SYMBOL_LOCKED)
+static bool
+intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+{
+ uint8_t lane_align;
+ uint8_t lane_status;
+ int lane;
+
+ lane_align = intel_dp_link_status(link_status,
+ DP_LANE_ALIGN_STATUS_UPDATED);
+ if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return false;
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = intel_get_lane_status(link_status, lane);
+ if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
+ return false;
+ }
+ return true;
+}
+
+static bool
+intel_dp_set_link_train(struct intel_output *intel_output,
+ uint32_t dp_reg_value,
+ uint8_t dp_train_pat,
+ uint8_t train_set[4],
+ bool first)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ int ret;
+
+ I915_WRITE(dp_priv->output_reg, dp_reg_value);
+ POSTING_READ(dp_priv->output_reg);
+ if (first)
+ intel_wait_for_vblank(dev);
+
+ intel_dp_aux_native_write_1(intel_output,
+ DP_TRAINING_PATTERN_SET,
+ dp_train_pat);
+
+ ret = intel_dp_aux_native_write(intel_output,
+ DP_TRAINING_LANE0_SET, train_set, 4);
+ if (ret != 4)
+ return false;
+
+ return true;
+}
+
+static void
+intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
+ uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint8_t train_set[4];
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+ int i;
+ uint8_t voltage;
+ bool clock_recovery = false;
+ bool channel_eq = false;
+ bool first = true;
+ int tries;
+
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_output, 0x100,
+ link_configuration, DP_LINK_CONFIGURATION_SIZE);
+
+ DP |= DP_PORT_EN;
+ DP &= ~DP_LINK_TRAIN_MASK;
+ memset(train_set, 0, 4);
+ voltage = 0xff;
+ tries = 0;
+ clock_recovery = false;
+ for (;;) {
+ /* Use train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+
+ if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1,
+ DP_TRAINING_PATTERN_1, train_set, first))
+ break;
+ first = false;
+ /* Set training pattern 1 */
+
+ udelay(100);
+ if (!intel_dp_get_link_status(intel_output, link_status))
+ break;
+
+ if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
+ clock_recovery = true;
+ break;
+ }
+
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < dp_priv->lane_count; i++)
+ if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ break;
+ if (i == dp_priv->lane_count)
+ break;
+
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ }
+
+ /* channel equalization */
+ tries = 0;
+ channel_eq = false;
+ for (;;) {
+ /* Use train_set[0] to set the voltage and pre emphasis values */
+ uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
+ DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
+
+ /* channel eq pattern */
+ if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2,
+ DP_TRAINING_PATTERN_2, train_set,
+ false))
+ break;
+
+ udelay(400);
+ if (!intel_dp_get_link_status(intel_output, link_status))
+ break;
+
+ if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
+ channel_eq = true;
+ break;
+ }
+
+ /* Try 5 times */
+ if (tries > 5)
+ break;
+
+ /* Compute new train_set as requested by target */
+ intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set);
+ ++tries;
+ }
+
+ I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
+ POSTING_READ(dp_priv->output_reg);
+ intel_dp_aux_native_write_1(intel_output,
+ DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
+{
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(dp_priv->output_reg);
+}
+
+static void
+intel_dp_restore(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (dp_priv->save_DP & DP_PORT_EN)
+ intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration);
+ else
+ intel_dp_link_down(intel_output, dp_priv->save_DP);
+}
+
+/*
+ * According to DP spec
+ * 5.1.2:
+ * 1. Read DPCD
+ * 2. Configure link according to Receiver Capabilities
+ * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
+ * 4. Check link status on receipt of hot-plug interrupt
+ */
+
+static void
+intel_dp_check_link_status(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ if (!intel_output->enc.crtc)
+ return;
+
+ if (!intel_dp_get_link_status(intel_output, link_status)) {
+ intel_dp_link_down(intel_output, dp_priv->DP);
+ return;
+ }
+
+ if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
+ intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+}
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
+ *
+ * \return true if DP port is connected.
+ * \return false if DP port is disconnected.
+ */
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct drm_device *dev = intel_output->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ uint32_t temp, bit;
+ enum drm_connector_status status;
+
+ dp_priv->has_audio = false;
+
+ temp = I915_READ(PORT_HOTPLUG_EN);
+
+ I915_WRITE(PORT_HOTPLUG_EN,
+ temp |
+ DPB_HOTPLUG_INT_EN |
+ DPC_HOTPLUG_INT_EN |
+ DPD_HOTPLUG_INT_EN);
+
+ POSTING_READ(PORT_HOTPLUG_EN);
+
+ switch (dp_priv->output_reg) {
+ case DP_B:
+ bit = DPB_HOTPLUG_INT_STATUS;
+ break;
+ case DP_C:
+ bit = DPC_HOTPLUG_INT_STATUS;
+ break;
+ case DP_D:
+ bit = DPD_HOTPLUG_INT_STATUS;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+
+ temp = I915_READ(PORT_HOTPLUG_STAT);
+
+ if ((temp & bit) == 0)
+ return connector_status_disconnected;
+
+ status = connector_status_disconnected;
+ if (intel_dp_aux_native_read(intel_output,
+ 0x000, dp_priv->dpcd,
+ sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
+ {
+ if (dp_priv->dpcd[0] != 0)
+ status = connector_status_connected;
+ }
+ return status;
+}
+
+static int intel_dp_get_modes(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ /* We should parse the EDID data and find out if it has an audio sink
+ */
+
+ return intel_ddc_get_modes(intel_output);
+}
+
+static void
+intel_dp_destroy (struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+
+ if (intel_output->i2c_bus)
+ intel_i2c_destroy(intel_output->i2c_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(intel_output);
+}
+
+static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
+ .dpms = intel_dp_dpms,
+ .mode_fixup = intel_dp_mode_fixup,
+ .prepare = intel_encoder_prepare,
+ .mode_set = intel_dp_mode_set,
+ .commit = intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs intel_dp_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = intel_dp_save,
+ .restore = intel_dp_restore,
+ .detect = intel_dp_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = intel_dp_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
+ .get_modes = intel_dp_get_modes,
+ .mode_valid = intel_dp_mode_valid,
+ .best_encoder = intel_best_encoder,
+};
+
+static void intel_dp_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .destroy = intel_dp_enc_destroy,
+};
+
+void
+intel_dp_hot_plug(struct intel_output *intel_output)
+{
+ struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+
+ if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
+ intel_dp_check_link_status(intel_output);
+}
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_connector *connector;
+ struct intel_output *intel_output;
+ struct intel_dp_priv *dp_priv;
+
+ intel_output = kcalloc(sizeof(struct intel_output) +
+ sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
+ if (!intel_output)
+ return;
+
+ dp_priv = (struct intel_dp_priv *)(intel_output + 1);
+
+ connector = &intel_output->base;
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
+
+ intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
+
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = 0;
+
+ dp_priv->intel_output = intel_output;
+ dp_priv->output_reg = output_reg;
+ dp_priv->has_audio = false;
+ dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
+ intel_output->dev_priv = dp_priv;
+
+ drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs);
+
+ drm_mode_connector_attach_encoder(&intel_output->base,
+ &intel_output->enc);
+ drm_sysfs_connector_add(connector);
+
+ /* Set up the DDC bus. */
+ intel_dp_i2c_init(intel_output,
+ (output_reg == DP_B) ? "DPDDC-B" :
+ (output_reg == DP_C) ? "DPDDC-C" : "DPDDC-D");
+ intel_output->ddc_bus = &dp_priv->adapter;
+ intel_output->hot_plug = intel_dp_hot_plug;
+
+ /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ if (IS_G4X(dev) && !IS_GM45(dev)) {
+ u32 temp = I915_READ(PEG_BAND_GAP_DATA);
+ I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 0000000..2b38054
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _INTEL_DP_H_
+#define _INTEL_DP_H_
+
+/* From the VESA DisplayPort spec */
+
+#define AUX_NATIVE_WRITE 0x8
+#define AUX_NATIVE_READ 0x9
+#define AUX_I2C_WRITE 0x0
+#define AUX_I2C_READ 0x1
+#define AUX_I2C_STATUS 0x2
+#define AUX_I2C_MOT 0x4
+
+#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
+#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
+#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
+#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
+
+#define AUX_I2C_REPLY_ACK (0x0 << 6)
+#define AUX_I2C_REPLY_NACK (0x1 << 6)
+#define AUX_I2C_REPLY_DEFER (0x2 << 6)
+#define AUX_I2C_REPLY_MASK (0x3 << 6)
+
+/* AUX CH addresses */
+#define DP_LINK_BW_SET 0x100
+# define DP_LINK_BW_1_62 0x06
+# define DP_LINK_BW_2_7 0x0a
+
+#define DP_LANE_COUNT_SET 0x101
+# define DP_LANE_COUNT_MASK 0x0f
+# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
+
+#define DP_TRAINING_PATTERN_SET 0x102
+
+# define DP_TRAINING_PATTERN_DISABLE 0
+# define DP_TRAINING_PATTERN_1 1
+# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_MASK 0x3
+
+# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
+# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
+# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
+# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
+# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
+
+# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
+# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
+
+# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
+# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
+# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
+# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
+
+#define DP_TRAINING_LANE0_SET 0x103
+#define DP_TRAINING_LANE1_SET 0x104
+#define DP_TRAINING_LANE2_SET 0x105
+#define DP_TRAINING_LANE3_SET 0x106
+
+# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
+# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
+# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
+# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
+# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
+
+# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
+# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
+
+# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
+# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+
+#define DP_DOWNSPREAD_CTRL 0x107
+# define DP_SPREAD_AMP_0_5 (1 << 4)
+
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+# define DP_SET_ANSI_8B10B (1 << 0)
+
+#define DP_LANE0_1_STATUS 0x202
+#define DP_LANE2_3_STATUS 0x203
+
+# define DP_LANE_CR_DONE (1 << 0)
+# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
+# define DP_LANE_SYMBOL_LOCKED (1 << 2)
+
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
+
+#define DP_SINK_STATUS 0x205
+
+#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+
+#define DP_ADJUST_REQUEST_LANE0_1 0x206
+#define DP_ADJUST_REQUEST_LANE2_3 0x207
+
+#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
+#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
+#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
+#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
+#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
+#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+
+struct i2c_algo_dp_aux_data {
+ bool running;
+ u16 address;
+ int (*aux_ch) (struct i2c_adapter *adapter,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_bytes);
+};
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+
+#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c
new file mode 100644
index 0000000..4e60f14
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_i2c.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright © 2009 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
+#include "intel_dp.h"
+
+/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+
+#define MODE_I2C_START 1
+#define MODE_I2C_WRITE 2
+#define MODE_I2C_READ 4
+#define MODE_I2C_STOP 8
+
+static int
+i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+ uint8_t write_byte, uint8_t *read_byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ uint16_t address = algo_data->address;
+ uint8_t msg[5];
+ uint8_t reply[2];
+ int msg_bytes;
+ int reply_bytes;
+ int ret;
+
+ /* Set up the command byte */
+ if (mode & MODE_I2C_READ)
+ msg[0] = AUX_I2C_READ << 4;
+ else
+ msg[0] = AUX_I2C_WRITE << 4;
+
+ if (!(mode & MODE_I2C_STOP))
+ msg[0] |= AUX_I2C_MOT << 4;
+
+ msg[1] = address >> 8;
+ msg[2] = address;
+
+ switch (mode) {
+ case MODE_I2C_WRITE:
+ msg[3] = 0;
+ msg[4] = write_byte;
+ msg_bytes = 5;
+ reply_bytes = 1;
+ break;
+ case MODE_I2C_READ:
+ msg[3] = 0;
+ msg_bytes = 4;
+ reply_bytes = 2;
+ break;
+ default:
+ msg_bytes = 3;
+ reply_bytes = 1;
+ break;
+ }
+
+ for (;;) {
+ ret = (*algo_data->aux_ch)(adapter,
+ msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret < 0) {
+ printk(KERN_ERR "aux_ch failed %d\n", ret);
+ return ret;
+ }
+ switch (reply[0] & AUX_I2C_REPLY_MASK) {
+ case AUX_I2C_REPLY_ACK:
+ if (mode == MODE_I2C_READ) {
+ *read_byte = reply[1];
+ }
+ return reply_bytes - 1;
+ case AUX_I2C_REPLY_NACK:
+ printk(KERN_ERR "aux_ch nack\n");
+ return -EREMOTEIO;
+ case AUX_I2C_REPLY_DEFER:
+ printk(KERN_ERR "aux_ch defer\n");
+ udelay(100);
+ break;
+ default:
+ printk(KERN_ERR "aux_ch invalid reply 0x%02x\n", reply[0]);
+ return -EREMOTEIO;
+ }
+ }
+}
+
+/*
+ * I2C over AUX CH
+ */
+
+/*
+ * Send the address. If the I2C link is running, this 'restarts'
+ * the connection with the new address, this is used for doing
+ * a write followed by a read (as needed for DDC)
+ */
+static int
+i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_START;
+ int ret;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ algo_data->address = address;
+ algo_data->running = true;
+ ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ return ret;
+}
+
+/*
+ * Stop the I2C transaction. This closes out the link, sending
+ * a bare address packet with the MOT bit turned off
+ */
+static void
+i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int mode = MODE_I2C_STOP;
+
+ if (reading)
+ mode |= MODE_I2C_READ;
+ else
+ mode |= MODE_I2C_WRITE;
+ if (algo_data->running) {
+ (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+ algo_data->running = false;
+ }
+}
+
+/*
+ * Write a single byte to the current I2C address, the
+ * the I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+ return ret;
+}
+
+/*
+ * Read a single byte from the current I2C address, the
+ * I2C link must be running or this returns -EIO
+ */
+static int
+i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+{
+ struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ int ret;
+
+ if (!algo_data->running)
+ return -EIO;
+
+ ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+ return ret;
+}
+
+static int
+i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ int ret = 0;
+ bool reading = false;
+ int m;
+ int b;
+
+ for (m = 0; m < num; m++) {
+ u16 len = msgs[m].len;
+ u8 *buf = msgs[m].buf;
+ reading = (msgs[m].flags & I2C_M_RD) != 0;
+ ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+ if (ret < 0)
+ break;
+ if (reading) {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ for (b = 0; b < len; b++) {
+ ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ }
+ if (ret >= 0)
+ ret = num;
+ i2c_algo_dp_aux_stop(adapter, reading);
+ printk(KERN_ERR "dp_aux_xfer return %d\n", ret);
+ return ret;
+}
+
+static u32
+i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm i2c_dp_aux_algo = {
+ .master_xfer = i2c_algo_dp_aux_xfer,
+ .functionality = i2c_algo_dp_aux_functionality,
+};
+
+static void
+i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+{
+ (void) i2c_algo_dp_aux_address(adapter, 0, false);
+ (void) i2c_algo_dp_aux_stop(adapter, false);
+
+}
+
+static int
+i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+{
+ adapter->algo = &i2c_dp_aux_algo;
+ adapter->retries = 3;
+ i2c_dp_aux_reset_bus(adapter);
+ return 0;
+}
+
+int
+i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+{
+ int error;
+
+ error = i2c_dp_aux_prepare_bus(adapter);
+ if (error)
+ return error;
+ error = i2c_add_adapter(adapter);
+ return error;
+}
+EXPORT_SYMBOL(i2c_dp_aux_add_bus);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd4b9c5..004541c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -54,6 +54,7 @@
#define INTEL_OUTPUT_LVDS 4
#define INTEL_OUTPUT_TVOUT 5
#define INTEL_OUTPUT_HDMI 6
+#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -65,7 +66,6 @@
u32 reg; /* GPIO reg */
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
- u8 slave_addr;
};
struct intel_framebuffer {
@@ -79,11 +79,12 @@
struct drm_encoder enc;
int type;
- struct intel_i2c_chan *i2c_bus; /* for control functions */
- struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
+ struct i2c_adapter *i2c_bus;
+ struct i2c_adapter *ddc_bus;
bool load_detect_temp;
bool needs_tv_clock;
void *dev_priv;
+ void (*hot_plug)(struct intel_output *);
};
struct intel_crtc {
@@ -104,9 +105,9 @@
#define enc_to_intel_output(x) container_of(x, struct intel_output, enc)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
-struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name);
-void intel_i2c_destroy(struct intel_i2c_chan *chan);
+struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name);
+void intel_i2c_destroy(struct i2c_adapter *adapter);
int intel_ddc_get_modes(struct intel_output *intel_output);
extern bool intel_ddc_probe(struct intel_output *intel_output);
void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
@@ -116,6 +117,10 @@
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_lvds_init(struct drm_device *dev);
+extern void intel_dp_init(struct drm_device *dev, int dp_reg);
+void
+intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 1ee3007..13bff20 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -384,10 +384,9 @@
{
struct intel_output *intel_output;
struct intel_dvo_device *dvo;
- struct intel_i2c_chan *i2cbus = NULL;
+ struct i2c_adapter *i2cbus = NULL;
int ret = 0;
int i;
- int gpio_inited = 0;
int encoder_type = DRM_MODE_ENCODER_NONE;
intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL);
if (!intel_output)
@@ -420,14 +419,11 @@
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
- if (gpio_inited != gpio) {
- if (i2cbus != NULL)
- intel_i2c_destroy(i2cbus);
- if (!(i2cbus = intel_i2c_create(dev, gpio,
- gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
- continue;
- }
- gpio_inited = gpio;
+ if (i2cbus != NULL)
+ intel_i2c_destroy(i2cbus);
+ if (!(i2cbus = intel_i2c_create(dev, gpio,
+ gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
+ continue;
}
if (dvo->dev_ops!= NULL)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4ea2a65..9e30daa 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -31,6 +31,7 @@
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -56,8 +57,7 @@
sdvox = SDVO_ENCODING_HDMI |
SDVO_BORDER_ENABLE |
SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH |
- SDVO_NULL_PACKETS_DURING_VSYNC;
+ SDVO_HSYNC_ACTIVE_HIGH;
if (hdmi_priv->has_hdmi_sink)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -129,20 +129,26 @@
return true;
}
-static void
-intel_hdmi_sink_detect(struct drm_connector *connector)
+static enum drm_connector_status
+intel_hdmi_edid_detect(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
struct edid *edid = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
- if (edid != NULL) {
- hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
- kfree(edid);
+ intel_output->ddc_bus);
+ hdmi_priv->has_hdmi_sink = false;
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ }
intel_output->base.display_info.raw_edid = NULL;
+ kfree(edid);
}
+ return status;
}
static enum drm_connector_status
@@ -154,11 +160,7 @@
/* FIXME hotplug detect */
hdmi_priv->has_hdmi_sink = false;
- intel_hdmi_sink_detect(connector);
- if (hdmi_priv->has_hdmi_sink)
- return connector_status_connected;
- else
- return connector_status_disconnected;
+ return intel_hdmi_edid_detect(connector);
}
static enum drm_connector_status
@@ -201,10 +203,9 @@
return connector_status_unknown;
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
- intel_hdmi_sink_detect(connector);
- return connector_status_connected;
- } else
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0)
+ return intel_hdmi_edid_detect(connector);
+ else
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index f7061f6..62b8bea 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,6 +124,7 @@
* @output: driver specific output device
* @reg: GPIO reg to use
* @name: name for this bus
+ * @slave_addr: slave address (if fixed)
*
* Creates and registers a new i2c bus with the Linux i2c layer, for use
* in output probing and control (e.g. DDC or SDVO control functions).
@@ -139,8 +140,8 @@
* %GPIOH
* see PRM for details on how these different busses are used.
*/
-struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
- const char *name)
+struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
+ const char *name)
{
struct intel_i2c_chan *chan;
@@ -174,7 +175,7 @@
intel_i2c_quirk_set(dev, false);
udelay(20);
- return chan;
+ return &chan->adapter;
out_free:
kfree(chan);
@@ -187,11 +188,16 @@
*
* Unregister the adapter from the i2c layer, then free the structure.
*/
-void intel_i2c_destroy(struct intel_i2c_chan *chan)
+void intel_i2c_destroy(struct i2c_adapter *adapter)
{
- if (!chan)
+ struct intel_i2c_chan *chan;
+
+ if (!adapter)
return;
+ chan = container_of(adapter,
+ struct intel_i2c_chan,
+ adapter);
i2c_del_adapter(&chan->adapter);
kfree(chan);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f073ed8..9564ca4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -39,6 +39,21 @@
#define I915_LVDS "i915_lvds"
+/*
+ * the following four scaling options are defined.
+ * #define DRM_MODE_SCALE_NON_GPU 0
+ * #define DRM_MODE_SCALE_FULLSCREEN 1
+ * #define DRM_MODE_SCALE_NO_SCALE 2
+ * #define DRM_MODE_SCALE_ASPECT 3
+ */
+
+/* Private structure for the integrated LVDS support */
+struct intel_lvds_priv {
+ int fitting_mode;
+ u32 pfit_control;
+ u32 pfit_pgm_ratios;
+};
+
/**
* Sets the backlight level.
*
@@ -213,10 +228,27 @@
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ /*
+ * float point operation is not supported . So the PANEL_RATIO_FACTOR
+ * is defined, which can avoid the float point computation when
+ * calculating the panel ratio.
+ */
+#define PANEL_RATIO_FACTOR 8192
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0;
+ int left_border = 0, right_border = 0, top_border = 0;
+ int bottom_border = 0;
+ bool border = 0;
+ int panel_ratio, desired_ratio, vert_scale, horiz_scale;
+ int horiz_ratio, vert_ratio;
+ u32 hsync_width, vsync_width;
+ u32 hblank_width, vblank_width;
+ u32 hsync_pos, vsync_pos;
/* Should never happen!! */
if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
@@ -232,7 +264,9 @@
return false;
}
}
-
+ /* If we don't have a panel mode, there is nothing we can do */
+ if (dev_priv->panel_fixed_mode == NULL)
+ return true;
/*
* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -256,6 +290,243 @@
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
+ /* Make sure pre-965s set dither correctly */
+ if (!IS_I965G(dev)) {
+ if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ }
+
+ /* Native modes don't need fitting */
+ if (adjusted_mode->hdisplay == mode->hdisplay &&
+ adjusted_mode->vdisplay == mode->vdisplay) {
+ pfit_pgm_ratios = 0;
+ border = 0;
+ goto out;
+ }
+
+ /* 965+ wants fuzzy fitting */
+ if (IS_I965G(dev))
+ pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY;
+
+ hsync_width = adjusted_mode->crtc_hsync_end -
+ adjusted_mode->crtc_hsync_start;
+ vsync_width = adjusted_mode->crtc_vsync_end -
+ adjusted_mode->crtc_vsync_start;
+ hblank_width = adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hblank_start;
+ vblank_width = adjusted_mode->crtc_vblank_end -
+ adjusted_mode->crtc_vblank_start;
+ /*
+ * Deal with panel fitting options. Figure out how to stretch the
+ * image based on its aspect ratio & the current panel fitting mode.
+ */
+ panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
+ adjusted_mode->vdisplay;
+ desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
+ mode->vdisplay;
+ /*
+ * Enable automatic panel scaling for non-native modes so that they fill
+ * the screen. Should be enabled before the pipe is enabled, according
+ * to register description and PRM.
+ * Change the value here to see the borders for debugging
+ */
+ I915_WRITE(BCLRPAT_A, 0);
+ I915_WRITE(BCLRPAT_B, 0);
+
+ switch (lvds_priv->fitting_mode) {
+ case DRM_MODE_SCALE_NO_SCALE:
+ /*
+ * For centered modes, we have to calculate border widths &
+ * heights and modify the values programmed into the CRTC.
+ */
+ left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
+ right_border = left_border;
+ if (mode->hdisplay & 1)
+ right_border++;
+ top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
+ bottom_border = top_border;
+ if (mode->vdisplay & 1)
+ bottom_border++;
+ /* Set active & border values */
+ adjusted_mode->crtc_hdisplay = mode->hdisplay;
+ /* Keep the boder be even */
+ if (right_border & 1)
+ right_border++;
+ /* use the border directly instead of border minuse one */
+ adjusted_mode->crtc_hblank_start = mode->hdisplay +
+ right_border;
+ /* keep the blank width constant */
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode->crtc_hblank_start + hblank_width;
+ /* get the hsync pos relative to hblank start */
+ hsync_pos = (hblank_width - hsync_width) / 2;
+ /* keep the hsync pos be even */
+ if (hsync_pos & 1)
+ hsync_pos++;
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode->crtc_hblank_start + hsync_pos;
+ /* keep the hsync width constant */
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode->crtc_hsync_start + hsync_width;
+ adjusted_mode->crtc_vdisplay = mode->vdisplay;
+ /* use the border instead of border minus one */
+ adjusted_mode->crtc_vblank_start = mode->vdisplay +
+ bottom_border;
+ /* keep the vblank width constant */
+ adjusted_mode->crtc_vblank_end =
+ adjusted_mode->crtc_vblank_start + vblank_width;
+ /* get the vsync start postion relative to vblank start */
+ vsync_pos = (vblank_width - vsync_width) / 2;
+ adjusted_mode->crtc_vsync_start =
+ adjusted_mode->crtc_vblank_start + vsync_pos;
+ /* keep the vsync width constant */
+ adjusted_mode->crtc_vsync_end =
+ adjusted_mode->crtc_vblank_start + vsync_width;
+ border = 1;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ /* Scale but preserve the spect ratio */
+ pfit_control |= PFIT_ENABLE;
+ if (IS_I965G(dev)) {
+ /* 965+ is easy, it does everything in hw */
+ if (panel_ratio > desired_ratio)
+ pfit_control |= PFIT_SCALING_PILLAR;
+ else if (panel_ratio < desired_ratio)
+ pfit_control |= PFIT_SCALING_LETTER;
+ else
+ pfit_control |= PFIT_SCALING_AUTO;
+ } else {
+ /*
+ * For earlier chips we have to calculate the scaling
+ * ratio by hand and program it into the
+ * PFIT_PGM_RATIO register
+ */
+ u32 horiz_bits, vert_bits, bits = 12;
+ horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
+ adjusted_mode->hdisplay;
+ vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
+ adjusted_mode->vdisplay;
+ horiz_scale = adjusted_mode->hdisplay *
+ PANEL_RATIO_FACTOR / mode->hdisplay;
+ vert_scale = adjusted_mode->vdisplay *
+ PANEL_RATIO_FACTOR / mode->vdisplay;
+
+ /* retain aspect ratio */
+ if (panel_ratio > desired_ratio) { /* Pillar */
+ u32 scaled_width;
+ scaled_width = mode->hdisplay * vert_scale /
+ PANEL_RATIO_FACTOR;
+ horiz_ratio = vert_ratio;
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ /* Pillar will have left/right borders */
+ left_border = (adjusted_mode->hdisplay -
+ scaled_width) / 2;
+ right_border = left_border;
+ if (mode->hdisplay & 1) /* odd resolutions */
+ right_border++;
+ /* keep the border be even */
+ if (right_border & 1)
+ right_border++;
+ adjusted_mode->crtc_hdisplay = scaled_width;
+ /* use border instead of border minus one */
+ adjusted_mode->crtc_hblank_start =
+ scaled_width + right_border;
+ /* keep the hblank width constant */
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode->crtc_hblank_start +
+ hblank_width;
+ /*
+ * get the hsync start pos relative to
+ * hblank start
+ */
+ hsync_pos = (hblank_width - hsync_width) / 2;
+ /* keep the hsync_pos be even */
+ if (hsync_pos & 1)
+ hsync_pos++;
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode->crtc_hblank_start +
+ hsync_pos;
+ /* keept hsync width constant */
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode->crtc_hsync_start +
+ hsync_width;
+ border = 1;
+ } else if (panel_ratio < desired_ratio) { /* letter */
+ u32 scaled_height = mode->vdisplay *
+ horiz_scale / PANEL_RATIO_FACTOR;
+ vert_ratio = horiz_ratio;
+ pfit_control |= (HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ /* Letterbox will have top/bottom border */
+ top_border = (adjusted_mode->vdisplay -
+ scaled_height) / 2;
+ bottom_border = top_border;
+ if (mode->vdisplay & 1)
+ bottom_border++;
+ adjusted_mode->crtc_vdisplay = scaled_height;
+ /* use border instead of border minus one */
+ adjusted_mode->crtc_vblank_start =
+ scaled_height + bottom_border;
+ /* keep the vblank width constant */
+ adjusted_mode->crtc_vblank_end =
+ adjusted_mode->crtc_vblank_start +
+ vblank_width;
+ /*
+ * get the vsync start pos relative to
+ * vblank start
+ */
+ vsync_pos = (vblank_width - vsync_width) / 2;
+ adjusted_mode->crtc_vsync_start =
+ adjusted_mode->crtc_vblank_start +
+ vsync_pos;
+ /* keep the vsync width constant */
+ adjusted_mode->crtc_vsync_end =
+ adjusted_mode->crtc_vsync_start +
+ vsync_width;
+ border = 1;
+ } else {
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ horiz_bits = (1 << bits) * horiz_ratio /
+ PANEL_RATIO_FACTOR;
+ vert_bits = (1 << bits) * vert_ratio /
+ PANEL_RATIO_FACTOR;
+ pfit_pgm_ratios =
+ ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
+ PFIT_VERT_SCALE_MASK) |
+ ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
+ PFIT_HORIZ_SCALE_MASK);
+ }
+ break;
+
+ case DRM_MODE_SCALE_FULLSCREEN:
+ /*
+ * Full scaling, even if it changes the aspect ratio.
+ * Fortunately this is all done for us in hw.
+ */
+ pfit_control |= PFIT_ENABLE;
+ if (IS_I965G(dev))
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ break;
+ default:
+ break;
+ }
+
+out:
+ lvds_priv->pfit_control = pfit_control;
+ lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
@@ -301,8 +572,8 @@
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- u32 pfit_control;
+ struct intel_output *intel_output = enc_to_intel_output(encoder);
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
/*
* The LVDS pin pair will already have been turned on in the
@@ -319,22 +590,8 @@
* screen. Should be enabled before the pipe is enabled, according to
* register description and PRM.
*/
- if (mode->hdisplay != adjusted_mode->hdisplay ||
- mode->vdisplay != adjusted_mode->vdisplay)
- pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
- HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- else
- pfit_control = 0;
-
- if (!IS_I965G(dev)) {
- if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
- }
- else
- pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
-
- I915_WRITE(PFIT_CONTROL, pfit_control);
+ I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control);
}
/**
@@ -406,6 +663,34 @@
struct drm_property *property,
uint64_t value)
{
+ struct drm_device *dev = connector->dev;
+ struct intel_output *intel_output =
+ to_intel_output(connector);
+
+ if (property == dev->mode_config.scaling_mode_property &&
+ connector->encoder) {
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ struct intel_lvds_priv *lvds_priv = intel_output->dev_priv;
+ if (value == DRM_MODE_SCALE_NON_GPU) {
+ DRM_DEBUG_KMS(I915_LVDS,
+ "non_GPU property is unsupported\n");
+ return 0;
+ }
+ if (lvds_priv->fitting_mode == value) {
+ /* the LVDS scaling property is not changed */
+ return 0;
+ }
+ lvds_priv->fitting_mode = value;
+ if (crtc && crtc->enabled) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y, crtc->fb);
+ }
+ }
+
return 0;
}
@@ -456,7 +741,7 @@
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core series)",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
},
},
@@ -464,7 +749,7 @@
.callback = intel_no_lvds_dmi_callback,
.ident = "Apple Mac Mini (Core 2 series)",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini2,1"),
},
},
@@ -518,6 +803,7 @@
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
+ struct intel_lvds_priv *lvds_priv;
u32 lvds;
int pipe, gpio = GPIOC;
@@ -531,7 +817,8 @@
gpio = PCH_GPIOC;
}
- intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+ intel_output = kzalloc(sizeof(struct intel_output) +
+ sizeof(struct intel_lvds_priv), GFP_KERNEL);
if (!intel_output) {
return;
}
@@ -553,7 +840,18 @@
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
+ lvds_priv = (struct intel_lvds_priv *)(intel_output + 1);
+ intel_output->dev_priv = lvds_priv;
+ /* create the scaling mode property */
+ drm_mode_create_scaling_mode_property(dev);
+ /*
+ * the initial panel fitting mode will be FULL_SCREEN.
+ */
+ drm_connector_attach_property(&intel_output->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@@ -649,5 +947,5 @@
if (intel_output->ddc_bus)
intel_i2c_destroy(intel_output->ddc_bus);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(intel_output);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index e0910fe..67e2f46 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -53,10 +53,9 @@
}
};
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true);
- ret = i2c_transfer(&intel_output->ddc_bus->adapter, msgs, 2);
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
-
+ intel_i2c_quirk_set(intel_output->base.dev, true);
+ ret = i2c_transfer(intel_output->ddc_bus, msgs, 2);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
if (ret == 2)
return true;
@@ -74,10 +73,9 @@
struct edid *edid;
int ret = 0;
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, true);
- edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
- intel_i2c_quirk_set(intel_output->ddc_bus->drm_dev, false);
+ intel_i2c_quirk_set(intel_output->base.dev, true);
+ edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus);
+ intel_i2c_quirk_set(intel_output->base.dev, false);
if (edid) {
drm_mode_connector_update_edid_property(&intel_output->base,
edid);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9a00adb..f034737 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -38,8 +38,7 @@
#undef SDVO_DEBUG
#define I915_SDVO "i915_sdvo"
struct intel_sdvo_priv {
- struct intel_i2c_chan *i2c_bus;
- int slaveaddr;
+ u8 slave_addr;
/* Register for the SDVO device: SDVOB or SDVOC */
int output_device;
@@ -146,13 +145,13 @@
struct i2c_msg msgs[] = {
{
- .addr = sdvo_priv->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
- .addr = sdvo_priv->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
@@ -162,7 +161,7 @@
out_buf[0] = addr;
out_buf[1] = 0;
- if ((ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2)) == 2)
+ if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2)
{
*ch = buf[0];
return true;
@@ -175,10 +174,11 @@
static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
u8 ch)
{
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
u8 out_buf[2];
struct i2c_msg msgs[] = {
{
- .addr = intel_output->i2c_bus->slave_addr,
+ .addr = sdvo_priv->slave_addr >> 1,
.flags = 0,
.len = 2,
.buf = out_buf,
@@ -188,7 +188,7 @@
out_buf[0] = addr;
out_buf[1] = ch;
- if (i2c_transfer(&intel_output->i2c_bus->adapter, msgs, 1) == 1)
+ if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1)
{
return true;
}
@@ -1369,9 +1369,8 @@
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct edid *edid = NULL;
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
edid = drm_get_edid(&intel_output->base,
- &intel_output->ddc_bus->adapter);
+ intel_output->ddc_bus);
if (edid != NULL) {
sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
kfree(edid);
@@ -1549,7 +1548,6 @@
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_output *intel_output = to_intel_output(connector);
- struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
/*
@@ -1557,8 +1555,6 @@
* Assume that the preferred modes are
* arranged in priority order.
*/
- /* set the bus switch and get the modes */
- intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
intel_ddc_get_modes(intel_output);
if (list_empty(&connector->probed_modes) == false)
return;
@@ -1709,7 +1705,7 @@
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
- if (to_intel_output(connector)->ddc_bus == chan) {
+ if (to_intel_output(connector)->ddc_bus == &chan->adapter) {
intel_output = to_intel_output(connector);
break;
}
@@ -1723,7 +1719,7 @@
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
struct i2c_algo_bit_data *algo_data;
- struct i2c_algorithm *algo;
+ const struct i2c_algorithm *algo;
algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
intel_output =
@@ -1733,7 +1729,7 @@
return -EINVAL;
sdvo_priv = intel_output->dev_priv;
- algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo;
+ algo = intel_output->i2c_bus->algo;
intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
return algo->master_xfer(i2c_adap, msgs, num);
@@ -1785,13 +1781,11 @@
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
- struct intel_i2c_chan *i2cbus = NULL;
- struct intel_i2c_chan *ddcbus = NULL;
+
int connector_type;
u8 ch[0x40];
int i;
- int encoder_type, output_id;
- u8 slave_addr;
+ int encoder_type;
intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
if (!intel_output) {
@@ -1799,29 +1793,24 @@
}
sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
+ sdvo_priv->output_device = output_device;
+
+ intel_output->dev_priv = sdvo_priv;
intel_output->type = INTEL_OUTPUT_SDVO;
/* setup the DDC bus. */
if (output_device == SDVOB)
- i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+ intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
else
- i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+ intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
- if (!i2cbus)
+ if (!intel_output->i2c_bus)
goto err_inteloutput;
- slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
- sdvo_priv->i2c_bus = i2cbus;
+ sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
- if (output_device == SDVOB) {
- output_id = 1;
- } else {
- output_id = 2;
- }
- sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
- sdvo_priv->output_device = output_device;
- intel_output->i2c_bus = i2cbus;
- intel_output->dev_priv = sdvo_priv;
+ /* Save the bit-banging i2c functionality for use by the DDC wrapper */
+ intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
@@ -1835,17 +1824,15 @@
/* setup the DDC bus. */
if (output_device == SDVOB)
- ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
else
- ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
+ intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
- if (ddcbus == NULL)
+ if (intel_output->ddc_bus == NULL)
goto err_i2c;
- intel_sdvo_i2c_bit_algo.functionality =
- intel_output->i2c_bus->adapter.algo->functionality;
- ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
- intel_output->ddc_bus = ddcbus;
+ /* Wrap with our custom algo which switches to DDC mode */
+ intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
/* In defaut case sdvo lvds is false */
sdvo_priv->is_lvds = false;
@@ -1965,9 +1952,10 @@
return true;
err_i2c:
- if (ddcbus != NULL)
+ if (intel_output->ddc_bus != NULL)
intel_i2c_destroy(intel_output->ddc_bus);
- intel_i2c_destroy(intel_output->i2c_bus);
+ if (intel_output->i2c_bus != NULL)
+ intel_i2c_destroy(intel_output->i2c_bus);
err_inteloutput:
kfree(intel_output);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea68992..a43c98e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1383,34 +1383,31 @@
/*
* Detect TV by polling)
*/
- if (intel_output->load_detect_temp) {
- /* TV not currently running, prod it with destructive detect */
- save_tv_dac = tv_dac;
- tv_ctl = I915_READ(TV_CTL);
- save_tv_ctl = tv_ctl;
- tv_ctl &= ~TV_ENC_ENABLE;
- tv_ctl &= ~TV_TEST_MODE_MASK;
- tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
- tv_dac &= ~TVDAC_SENSE_MASK;
- tv_dac &= ~DAC_A_MASK;
- tv_dac &= ~DAC_B_MASK;
- tv_dac &= ~DAC_C_MASK;
- tv_dac |= (TVDAC_STATE_CHG_EN |
- TVDAC_A_SENSE_CTL |
- TVDAC_B_SENSE_CTL |
- TVDAC_C_SENSE_CTL |
- DAC_CTL_OVERRIDE |
- DAC_A_0_7_V |
- DAC_B_0_7_V |
- DAC_C_0_7_V);
- I915_WRITE(TV_CTL, tv_ctl);
- I915_WRITE(TV_DAC, tv_dac);
- intel_wait_for_vblank(dev);
- tv_dac = I915_READ(TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
- I915_WRITE(TV_CTL, save_tv_ctl);
- intel_wait_for_vblank(dev);
- }
+ save_tv_dac = tv_dac;
+ tv_ctl = I915_READ(TV_CTL);
+ save_tv_ctl = tv_ctl;
+ tv_ctl &= ~TV_ENC_ENABLE;
+ tv_ctl &= ~TV_TEST_MODE_MASK;
+ tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ tv_dac &= ~TVDAC_SENSE_MASK;
+ tv_dac &= ~DAC_A_MASK;
+ tv_dac &= ~DAC_B_MASK;
+ tv_dac &= ~DAC_C_MASK;
+ tv_dac |= (TVDAC_STATE_CHG_EN |
+ TVDAC_A_SENSE_CTL |
+ TVDAC_B_SENSE_CTL |
+ TVDAC_C_SENSE_CTL |
+ DAC_CTL_OVERRIDE |
+ DAC_A_0_7_V |
+ DAC_B_0_7_V |
+ DAC_C_0_7_V);
+ I915_WRITE(TV_CTL, tv_ctl);
+ I915_WRITE(TV_DAC, tv_dac);
+ intel_wait_for_vblank(dev);
+ tv_dac = I915_READ(TV_DAC);
+ I915_WRITE(TV_DAC, save_tv_dac);
+ I915_WRITE(TV_CTL, save_tv_ctl);
+ intel_wait_for_vblank(dev);
/*
* A B C
* 0 1 1 Composite
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f30aa72..f97563d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -35,6 +35,23 @@
#include "atom.h"
/*
+ * Clear GPU surface registers.
+ */
+static void radeon_surface_init(struct radeon_device *rdev)
+{
+ /* FIXME: check this out */
+ if (rdev->family < CHIP_R600) {
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ WREG32(RADEON_SURFACE0_INFO +
+ i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
+ 0);
+ }
+ }
+}
+
+/*
* GPU scratch registers helpers function.
*/
static void radeon_scratch_init(struct radeon_device *rdev)
@@ -496,6 +513,8 @@
radeon_errata(rdev);
/* Initialize scratch registers */
radeon_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
@@ -604,9 +623,6 @@
if (r) {
return r;
}
- if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
- rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
- }
if (!ret) {
DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 09c9fb9..84ba69f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -345,7 +345,7 @@
drm_exit(driver);
}
-late_initcall(radeon_init);
+module_init(radeon_init);
module_exit(radeon_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fa86d39..9e8f191 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -478,14 +478,16 @@
{
struct fb_info *info;
struct radeon_fb_device *rfbdev;
- struct drm_framebuffer *fb;
+ struct drm_framebuffer *fb = NULL;
struct radeon_framebuffer *rfb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_object *robj = NULL;
struct device *device = &rdev->pdev->dev;
int size, aligned_size, ret;
+ u64 fb_gpuaddr;
void *fbptr = NULL;
+ unsigned long tmp;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
@@ -498,11 +500,12 @@
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_kernel,
- false, &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_kernel,
+ false, &gobj);
if (ret) {
- printk(KERN_ERR "failed to allocate framebuffer\n");
+ printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
+ surface_width, surface_height);
ret = -ENOMEM;
goto out;
}
@@ -515,12 +518,19 @@
ret = -ENOMEM;
goto out_unref;
}
+ ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+ if (ret) {
+ printk(KERN_ERR "failed to pin framebuffer\n");
+ ret = -ENOMEM;
+ goto out_unref;
+ }
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
rfb = to_radeon_framebuffer(fb);
*rfb_p = rfb;
rdev->fbdev_rfb = rfb;
+ rdev->fbdev_robj = robj;
info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
if (info == NULL) {
@@ -541,13 +551,13 @@
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
- info->fix.accel = FB_ACCEL_I830;
+ info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
info->fix.line_length = fb->pitch;
- info->screen_base = fbptr;
- info->fix.smem_start = (unsigned long)fbptr;
+ tmp = fb_gpuaddr - rdev->mc.vram_location;
+ info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = size;
info->screen_base = fbptr;
info->screen_size = size;
@@ -562,8 +572,8 @@
info->var.width = -1;
info->var.xres = fb_width;
info->var.yres = fb_height;
- info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
- info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
@@ -644,7 +654,7 @@
if (robj) {
radeon_object_kunmap(robj);
}
- if (ret) {
+ if (fb && ret) {
list_del(&fb->filp_head);
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
@@ -813,6 +823,7 @@
robj = rfb->obj->driver_private;
unregister_framebuffer(info);
radeon_object_kunmap(robj);
+ radeon_object_unpin(robj);
framebuffer_release(info);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 983e8df..bac0d06 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -223,7 +223,6 @@
{
uint32_t flags;
uint32_t tmp;
- void *fbptr;
int r;
flags = radeon_object_flags_from_domain(domain);
@@ -242,10 +241,6 @@
DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
return r;
}
- if (robj->rdev->fbdev_robj == robj) {
- mutex_lock(&robj->rdev->fbdev_info->lock);
- radeon_object_kunmap(robj);
- }
tmp = robj->tobj.mem.placement;
ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
@@ -261,23 +256,12 @@
DRM_ERROR("radeon: failed to pin object.\n");
}
radeon_object_unreserve(robj);
- if (robj->rdev->fbdev_robj == robj) {
- if (!r) {
- r = radeon_object_kmap(robj, &fbptr);
- }
- if (!r) {
- robj->rdev->fbdev_info->screen_base = fbptr;
- robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
- }
- mutex_unlock(&robj->rdev->fbdev_info->lock);
- }
return r;
}
void radeon_object_unpin(struct radeon_object *robj)
{
uint32_t flags;
- void *fbptr;
int r;
spin_lock(&robj->tobj.lock);
@@ -297,10 +281,6 @@
DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
return;
}
- if (robj->rdev->fbdev_robj == robj) {
- mutex_lock(&robj->rdev->fbdev_info->lock);
- radeon_object_kunmap(robj);
- }
flags = robj->tobj.mem.placement;
robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
r = ttm_buffer_object_validate(&robj->tobj,
@@ -310,16 +290,6 @@
DRM_ERROR("radeon: failed to unpin buffer.\n");
}
radeon_object_unreserve(robj);
- if (robj->rdev->fbdev_robj == robj) {
- if (!r) {
- r = radeon_object_kmap(robj, &fbptr);
- }
- if (!r) {
- robj->rdev->fbdev_info->screen_base = fbptr;
- robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
- }
- mutex_unlock(&robj->rdev->fbdev_info->lock);
- }
}
int radeon_object_wait(struct radeon_object *robj)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 517c845..bdec583 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -34,7 +34,6 @@
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <linux/module.h>
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 27b146c..40b7503 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -32,7 +32,6 @@
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <linux/mm.h>
-#include <linux/version.h>
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 0331fa7..75dc8bd 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,7 +28,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/highmem.h>
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 3c259ee..8206442 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -326,6 +326,16 @@
devices such as DaVinci NIC.
For details please see http://www.ti.com/davinci
+config I2C_DESIGNWARE
+ tristate "Synopsys DesignWare"
+ depends on HAVE_CLK
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-designware.
+
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
depends on GENERIC_GPIO
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index edeabf0..e654263b 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -30,6 +30,7 @@
obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
+obj-$(CONFIG_I2C_DESIGNWARE) += i2c-designware.o
obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b5db8b8..9c2e100 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -140,7 +140,7 @@
dev_dbg(&adap->dev, "Interrupt: %x\n", i);
- wake_up_interruptible(&cpm->i2c_wait);
+ wake_up(&cpm->i2c_wait);
return i ? IRQ_HANDLED : IRQ_NONE;
}
@@ -364,12 +364,12 @@
dev_dbg(&adap->dev, "test ready.\n");
pmsg = &msgs[tptr];
if (pmsg->flags & I2C_M_RD)
- ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ ret = wait_event_timeout(cpm->i2c_wait,
(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) ||
!(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY),
1 * HZ);
else
- ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ ret = wait_event_timeout(cpm->i2c_wait,
!(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_READY),
1 * HZ);
if (ret == 0) {
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c
new file mode 100644
index 0000000..b444762
--- /dev/null
+++ b/drivers/i2c/busses/i2c-designware.c
@@ -0,0 +1,624 @@
+/*
+ * Synopsys Designware I2C adapter driver (master only).
+ *
+ * Based on the TI DAVINCI I2C adapter driver.
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software Inc.
+ * Copyright (C) 2009 Provigent Ltd.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * Registers offset
+ */
+#define DW_IC_CON 0x0
+#define DW_IC_TAR 0x4
+#define DW_IC_DATA_CMD 0x10
+#define DW_IC_SS_SCL_HCNT 0x14
+#define DW_IC_SS_SCL_LCNT 0x18
+#define DW_IC_FS_SCL_HCNT 0x1c
+#define DW_IC_FS_SCL_LCNT 0x20
+#define DW_IC_INTR_STAT 0x2c
+#define DW_IC_INTR_MASK 0x30
+#define DW_IC_CLR_INTR 0x40
+#define DW_IC_ENABLE 0x6c
+#define DW_IC_STATUS 0x70
+#define DW_IC_TXFLR 0x74
+#define DW_IC_RXFLR 0x78
+#define DW_IC_COMP_PARAM_1 0xf4
+#define DW_IC_TX_ABRT_SOURCE 0x80
+
+#define DW_IC_CON_MASTER 0x1
+#define DW_IC_CON_SPEED_STD 0x2
+#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_10BITADDR_MASTER 0x10
+#define DW_IC_CON_RESTART_EN 0x20
+#define DW_IC_CON_SLAVE_DISABLE 0x40
+
+#define DW_IC_INTR_TX_EMPTY 0x10
+#define DW_IC_INTR_TX_ABRT 0x40
+#define DW_IC_INTR_STOP_DET 0x200
+
+#define DW_IC_STATUS_ACTIVITY 0x1
+
+#define DW_IC_ERR_TX_ABRT 0x1
+
+/*
+ * status codes
+ */
+#define STATUS_IDLE 0x0
+#define STATUS_WRITE_IN_PROGRESS 0x1
+#define STATUS_READ_IN_PROGRESS 0x2
+
+#define TIMEOUT 20 /* ms */
+
+/*
+ * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
+ *
+ * only expected abort codes are listed here
+ * refer to the datasheet for the full list
+ */
+#define ABRT_7B_ADDR_NOACK 0
+#define ABRT_10ADDR1_NOACK 1
+#define ABRT_10ADDR2_NOACK 2
+#define ABRT_TXDATA_NOACK 3
+#define ABRT_GCALL_NOACK 4
+#define ABRT_GCALL_READ 5
+#define ABRT_SBYTE_ACKDET 7
+#define ABRT_SBYTE_NORSTRT 9
+#define ABRT_10B_RD_NORSTRT 10
+#define ARB_MASTER_DIS 11
+#define ARB_LOST 12
+
+static char *abort_sources[] = {
+ [ABRT_7B_ADDR_NOACK] =
+ "slave address not acknowledged (7bit mode)",
+ [ABRT_10ADDR1_NOACK] =
+ "first address byte not acknowledged (10bit mode)",
+ [ABRT_10ADDR2_NOACK] =
+ "second address byte not acknowledged (10bit mode)",
+ [ABRT_TXDATA_NOACK] =
+ "data not acknowledged",
+ [ABRT_GCALL_NOACK] =
+ "no acknowledgement for a general call",
+ [ABRT_GCALL_READ] =
+ "read after general call",
+ [ABRT_SBYTE_ACKDET] =
+ "start byte acknowledged",
+ [ABRT_SBYTE_NORSTRT] =
+ "trying to send start byte when restart is disabled",
+ [ABRT_10B_RD_NORSTRT] =
+ "trying to read when restart is disabled (10bit mode)",
+ [ARB_MASTER_DIS] =
+ "trying to use disabled adapter",
+ [ARB_LOST] =
+ "lost arbitration",
+};
+
+/**
+ * struct dw_i2c_dev - private i2c-designware data
+ * @dev: driver model device node
+ * @base: IO registers pointer
+ * @cmd_complete: tx completion indicator
+ * @pump_msg: continue in progress transfers
+ * @lock: protect this struct and IO registers
+ * @clk: input reference clock
+ * @cmd_err: run time hadware error code
+ * @msgs: points to an array of messages currently being transfered
+ * @msgs_num: the number of elements in msgs
+ * @msg_write_idx: the element index of the current tx message in the msgs
+ * array
+ * @tx_buf_len: the length of the current tx buffer
+ * @tx_buf: the current tx buffer
+ * @msg_read_idx: the element index of the current rx message in the msgs
+ * array
+ * @rx_buf_len: the length of the current rx buffer
+ * @rx_buf: the current rx buffer
+ * @msg_err: error status of the current transfer
+ * @status: i2c master status, one of STATUS_*
+ * @abort_source: copy of the TX_ABRT_SOURCE register
+ * @irq: interrupt number for the i2c master
+ * @adapter: i2c subsystem adapter node
+ * @tx_fifo_depth: depth of the hardware tx fifo
+ * @rx_fifo_depth: depth of the hardware rx fifo
+ */
+struct dw_i2c_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct tasklet_struct pump_msg;
+ struct mutex lock;
+ struct clk *clk;
+ int cmd_err;
+ struct i2c_msg *msgs;
+ int msgs_num;
+ int msg_write_idx;
+ u16 tx_buf_len;
+ u8 *tx_buf;
+ int msg_read_idx;
+ u16 rx_buf_len;
+ u8 *rx_buf;
+ int msg_err;
+ unsigned int status;
+ u16 abort_source;
+ int irq;
+ struct i2c_adapter adapter;
+ unsigned int tx_fifo_depth;
+ unsigned int rx_fifo_depth;
+};
+
+/**
+ * i2c_dw_init() - initialize the designware i2c master hardware
+ * @dev: device private data
+ *
+ * This functions configures and enables the I2C master.
+ * This function is called during I2C init function, and in case of timeout at
+ * run time.
+ */
+static void i2c_dw_init(struct dw_i2c_dev *dev)
+{
+ u32 input_clock_khz = clk_get_rate(dev->clk) / 1000;
+ u16 ic_con;
+
+ /* Disable the adapter */
+ writeb(0, dev->base + DW_IC_ENABLE);
+
+ /* set standard and fast speed deviders for high/low periods */
+ writew((input_clock_khz * 40 / 10000)+1, /* std speed high, 4us */
+ dev->base + DW_IC_SS_SCL_HCNT);
+ writew((input_clock_khz * 47 / 10000)+1, /* std speed low, 4.7us */
+ dev->base + DW_IC_SS_SCL_LCNT);
+ writew((input_clock_khz * 6 / 10000)+1, /* fast speed high, 0.6us */
+ dev->base + DW_IC_FS_SCL_HCNT);
+ writew((input_clock_khz * 13 / 10000)+1, /* fast speed low, 1.3us */
+ dev->base + DW_IC_FS_SCL_LCNT);
+
+ /* configure the i2c master */
+ ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+ DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
+ writew(ic_con, dev->base + DW_IC_CON);
+}
+
+/*
+ * Waiting for bus not busy
+ */
+static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
+{
+ int timeout = TIMEOUT;
+
+ while (readb(dev->base + DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
+ if (timeout <= 0) {
+ dev_warn(dev->dev, "timeout waiting for bus ready\n");
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+/*
+ * Initiate low level master read/write transaction.
+ * This function is called from i2c_dw_xfer when starting a transfer.
+ * This function is also called from dw_i2c_pump_msg to continue a transfer
+ * that is longer than the size of the TX FIFO.
+ */
+static void
+i2c_dw_xfer_msg(struct i2c_adapter *adap)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ struct i2c_msg *msgs = dev->msgs;
+ int num = dev->msgs_num;
+ u16 ic_con, intr_mask;
+ int tx_limit = dev->tx_fifo_depth - readb(dev->base + DW_IC_TXFLR);
+ int rx_limit = dev->rx_fifo_depth - readb(dev->base + DW_IC_RXFLR);
+ u16 addr = msgs[dev->msg_write_idx].addr;
+ u16 buf_len = dev->tx_buf_len;
+
+ if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
+ /* Disable the adapter */
+ writeb(0, dev->base + DW_IC_ENABLE);
+
+ /* set the slave (target) address */
+ writew(msgs[dev->msg_write_idx].addr, dev->base + DW_IC_TAR);
+
+ /* if the slave address is ten bit address, enable 10BITADDR */
+ ic_con = readw(dev->base + DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ else
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ writew(ic_con, dev->base + DW_IC_CON);
+
+ /* Enable the adapter */
+ writeb(1, dev->base + DW_IC_ENABLE);
+ }
+
+ for (; dev->msg_write_idx < num; dev->msg_write_idx++) {
+ /* if target address has changed, we need to
+ * reprogram the target address in the i2c
+ * adapter when we are done with this transfer
+ */
+ if (msgs[dev->msg_write_idx].addr != addr)
+ return;
+
+ if (msgs[dev->msg_write_idx].len == 0) {
+ dev_err(dev->dev,
+ "%s: invalid message length\n", __func__);
+ dev->msg_err = -EINVAL;
+ return;
+ }
+
+ if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
+ /* new i2c_msg */
+ dev->tx_buf = msgs[dev->msg_write_idx].buf;
+ buf_len = msgs[dev->msg_write_idx].len;
+ }
+
+ while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
+ if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
+ writew(0x100, dev->base + DW_IC_DATA_CMD);
+ rx_limit--;
+ } else
+ writew(*(dev->tx_buf++),
+ dev->base + DW_IC_DATA_CMD);
+ tx_limit--; buf_len--;
+ }
+ }
+
+ intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT;
+ if (buf_len > 0) { /* more bytes to be written */
+ intr_mask |= DW_IC_INTR_TX_EMPTY;
+ dev->status |= STATUS_WRITE_IN_PROGRESS;
+ } else
+ dev->status &= ~STATUS_WRITE_IN_PROGRESS;
+ writew(intr_mask, dev->base + DW_IC_INTR_MASK);
+
+ dev->tx_buf_len = buf_len;
+}
+
+static void
+i2c_dw_read(struct i2c_adapter *adap)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ struct i2c_msg *msgs = dev->msgs;
+ int num = dev->msgs_num;
+ u16 addr = msgs[dev->msg_read_idx].addr;
+ int rx_valid = readw(dev->base + DW_IC_RXFLR);
+
+ for (; dev->msg_read_idx < num; dev->msg_read_idx++) {
+ u16 len;
+ u8 *buf;
+
+ if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
+ continue;
+
+ /* different i2c client, reprogram the i2c adapter */
+ if (msgs[dev->msg_read_idx].addr != addr)
+ return;
+
+ if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
+ len = msgs[dev->msg_read_idx].len;
+ buf = msgs[dev->msg_read_idx].buf;
+ } else {
+ len = dev->rx_buf_len;
+ buf = dev->rx_buf;
+ }
+
+ for (; len > 0 && rx_valid > 0; len--, rx_valid--)
+ *buf++ = readb(dev->base + DW_IC_DATA_CMD);
+
+ if (len > 0) {
+ dev->status |= STATUS_READ_IN_PROGRESS;
+ dev->rx_buf_len = len;
+ dev->rx_buf = buf;
+ return;
+ } else
+ dev->status &= ~STATUS_READ_IN_PROGRESS;
+ }
+}
+
+/*
+ * Prepare controller for a transaction and call i2c_dw_xfer_msg
+ */
+static int
+i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ int ret;
+
+ dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
+
+ mutex_lock(&dev->lock);
+
+ INIT_COMPLETION(dev->cmd_complete);
+ dev->msgs = msgs;
+ dev->msgs_num = num;
+ dev->cmd_err = 0;
+ dev->msg_write_idx = 0;
+ dev->msg_read_idx = 0;
+ dev->msg_err = 0;
+ dev->status = STATUS_IDLE;
+
+ ret = i2c_dw_wait_bus_not_busy(dev);
+ if (ret < 0)
+ goto done;
+
+ /* start the transfers */
+ i2c_dw_xfer_msg(adap);
+
+ /* wait for tx to complete */
+ ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
+ if (ret == 0) {
+ dev_err(dev->dev, "controller timed out\n");
+ i2c_dw_init(dev);
+ ret = -ETIMEDOUT;
+ goto done;
+ } else if (ret < 0)
+ goto done;
+
+ if (dev->msg_err) {
+ ret = dev->msg_err;
+ goto done;
+ }
+
+ /* no error */
+ if (likely(!dev->cmd_err)) {
+ /* read rx fifo, and disable the adapter */
+ do {
+ i2c_dw_read(adap);
+ } while (dev->status & STATUS_READ_IN_PROGRESS);
+ writeb(0, dev->base + DW_IC_ENABLE);
+ ret = num;
+ goto done;
+ }
+
+ /* We have an error */
+ if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
+ unsigned long abort_source = dev->abort_source;
+ int i;
+
+ for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) {
+ dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
+ }
+ }
+ ret = -EIO;
+
+done:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static u32 i2c_dw_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static void dw_i2c_pump_msg(unsigned long data)
+{
+ struct dw_i2c_dev *dev = (struct dw_i2c_dev *) data;
+ u16 intr_mask;
+
+ i2c_dw_read(&dev->adapter);
+ i2c_dw_xfer_msg(&dev->adapter);
+
+ intr_mask = DW_IC_INTR_STOP_DET | DW_IC_INTR_TX_ABRT;
+ if (dev->status & STATUS_WRITE_IN_PROGRESS)
+ intr_mask |= DW_IC_INTR_TX_EMPTY;
+ writew(intr_mask, dev->base + DW_IC_INTR_MASK);
+}
+
+/*
+ * Interrupt service routine. This gets called whenever an I2C interrupt
+ * occurs.
+ */
+static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
+{
+ struct dw_i2c_dev *dev = dev_id;
+ u16 stat;
+
+ stat = readw(dev->base + DW_IC_INTR_STAT);
+ dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
+ if (stat & DW_IC_INTR_TX_ABRT) {
+ dev->abort_source = readw(dev->base + DW_IC_TX_ABRT_SOURCE);
+ dev->cmd_err |= DW_IC_ERR_TX_ABRT;
+ dev->status = STATUS_IDLE;
+ } else if (stat & DW_IC_INTR_TX_EMPTY)
+ tasklet_schedule(&dev->pump_msg);
+
+ readb(dev->base + DW_IC_CLR_INTR); /* clear interrupts */
+ writew(0, dev->base + DW_IC_INTR_MASK); /* disable interrupts */
+ if (stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+ complete(&dev->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+static struct i2c_algorithm i2c_dw_algo = {
+ .master_xfer = i2c_dw_xfer,
+ .functionality = i2c_dw_func,
+};
+
+static int __devinit dw_i2c_probe(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev;
+ struct i2c_adapter *adap;
+ struct resource *mem, *irq, *ioarea;
+ int r;
+
+ /* NOTE: driver uses the static register mapping */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return -EINVAL;
+ }
+
+ ioarea = request_mem_region(mem->start, resource_size(mem),
+ pdev->name);
+ if (!ioarea) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ if (!dev) {
+ r = -ENOMEM;
+ goto err_release_region;
+ }
+
+ init_completion(&dev->cmd_complete);
+ tasklet_init(&dev->pump_msg, dw_i2c_pump_msg, (unsigned long) dev);
+ mutex_init(&dev->lock);
+ dev->dev = get_device(&pdev->dev);
+ dev->irq = irq->start;
+ platform_set_drvdata(pdev, dev);
+
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ r = -ENODEV;
+ goto err_free_mem;
+ }
+ clk_enable(dev->clk);
+
+ dev->base = ioremap(mem->start, resource_size(mem));
+ if (dev->base == NULL) {
+ dev_err(&pdev->dev, "failure mapping io resources\n");
+ r = -EBUSY;
+ goto err_unuse_clocks;
+ }
+ {
+ u32 param1 = readl(dev->base + DW_IC_COMP_PARAM_1);
+
+ dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
+ dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
+ }
+ i2c_dw_init(dev);
+
+ writew(0, dev->base + DW_IC_INTR_MASK); /* disable IRQ */
+ r = request_irq(dev->irq, i2c_dw_isr, 0, pdev->name, dev);
+ if (r) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
+ goto err_iounmap;
+ }
+
+ adap = &dev->adapter;
+ i2c_set_adapdata(adap, dev);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON;
+ strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
+ sizeof(adap->name));
+ adap->algo = &i2c_dw_algo;
+ adap->dev.parent = &pdev->dev;
+
+ adap->nr = pdev->id;
+ r = i2c_add_numbered_adapter(adap);
+ if (r) {
+ dev_err(&pdev->dev, "failure adding adapter\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ free_irq(dev->irq, dev);
+err_iounmap:
+ iounmap(dev->base);
+err_unuse_clocks:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+err_free_mem:
+ platform_set_drvdata(pdev, NULL);
+ put_device(&pdev->dev);
+ kfree(dev);
+err_release_region:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return r;
+}
+
+static int __devexit dw_i2c_remove(struct platform_device *pdev)
+{
+ struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ platform_set_drvdata(pdev, NULL);
+ i2c_del_adapter(&dev->adapter);
+ put_device(&pdev->dev);
+
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+ dev->clk = NULL;
+
+ writeb(0, dev->base + DW_IC_ENABLE);
+ free_irq(dev->irq, dev);
+ kfree(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:i2c_designware");
+
+static struct platform_driver dw_i2c_driver = {
+ .remove = __devexit_p(dw_i2c_remove),
+ .driver = {
+ .name = "i2c_designware",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init dw_i2c_init_driver(void)
+{
+ return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
+}
+module_init(dw_i2c_init_driver);
+
+static void __exit dw_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&dw_i2c_driver);
+}
+module_exit(dw_i2c_exit_driver);
+
+MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b606db8..ad8d201 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -339,7 +339,7 @@
* to get longer filter period for better noise suppression.
* The filter is iclk (fclk for HS) period.
*/
- if (dev->speed > 400 || cpu_is_omap_2430())
+ if (dev->speed > 400 || cpu_is_omap2430())
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index 03c8620..680e597 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -389,8 +389,7 @@
.init_chipset = init_chipset_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.port_ops = &cmd648_port_ops,
- .host_flags = IDE_HFLAG_SERIALIZE |
- IDE_HFLAG_ABUSE_PREFETCH,
+ .host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2,
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index bd066bb..09f98ed 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -135,6 +135,7 @@
ide_pci_setup_ports(dev, d, &hw[0], &hws[0]);
hw[0].irq = 14;
+ hw[1].irq = 15;
return ide_host_add(d, hws, 2, NULL);
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 4a19686..f0ede59 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -876,9 +876,12 @@
return stat;
/*
- * Sanity check the given block size
+ * Sanity check the given block size, in so far as making
+ * sure the sectors_per_frame we give to the caller won't
+ * end up being bogus.
*/
blocklen = be32_to_cpu(capbuf.blocklen);
+ blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS;
switch (blocklen) {
case 512:
case 1024:
@@ -886,10 +889,9 @@
case 4096:
break;
default:
- printk(KERN_ERR PFX "%s: weird block size %u\n",
+ printk_once(KERN_ERR PFX "%s: weird block size %u; "
+ "setting default block size to 2048\n",
drive->name, blocklen);
- printk(KERN_ERR PFX "%s: default to 2kb block size\n",
- drive->name);
blocklen = 2048;
break;
}
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 219e6fb..ee58c88 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -361,9 +361,6 @@
if (__ide_dma_bad_drive(drive))
return 0;
- if (ide_id_dma_bug(drive))
- return 0;
-
if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
return config_drive_for_dma(drive);
@@ -394,24 +391,6 @@
return -1;
}
-int ide_id_dma_bug(ide_drive_t *drive)
-{
- u16 *id = drive->id;
-
- if (id[ATA_ID_FIELD_VALID] & 4) {
- if ((id[ATA_ID_UDMA_MODES] >> 8) &&
- (id[ATA_ID_MWDMA_MODES] >> 8))
- goto err_out;
- } else if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
- (id[ATA_ID_SWDMA_MODES] >> 8))
- goto err_out;
-
- return 0;
-err_out:
- printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
- return 1;
-}
-
int ide_set_dma(ide_drive_t *drive)
{
int rc;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1059f80..93b7886 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -476,10 +476,14 @@
if (!ide_lock_port(hwif)) {
ide_hwif_t *prev_port;
-
- WARN_ON_ONCE(hwif->rq);
repeat:
prev_port = hwif->host->cur_port;
+
+ if (drive->dev_flags & IDE_DFLAG_BLOCKED)
+ rq = hwif->rq;
+ else
+ WARN_ON_ONCE(hwif->rq);
+
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
ide_unlock_port(hwif);
@@ -506,43 +510,29 @@
hwif->cur_dev = drive;
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
- spin_unlock_irq(&hwif->lock);
- spin_lock_irq(q->queue_lock);
- /*
- * we know that the queue isn't empty, but this can happen
- * if the q->prep_rq_fn() decides to kill a request
- */
- if (!rq)
+ if (rq == NULL) {
+ spin_unlock_irq(&hwif->lock);
+ spin_lock_irq(q->queue_lock);
+ /*
+ * we know that the queue isn't empty, but this can
+ * happen if ->prep_rq_fn() decides to kill a request
+ */
rq = blk_fetch_request(drive->queue);
+ spin_unlock_irq(q->queue_lock);
+ spin_lock_irq(&hwif->lock);
- spin_unlock_irq(q->queue_lock);
- spin_lock_irq(&hwif->lock);
-
- if (!rq) {
- ide_unlock_port(hwif);
- goto out;
+ if (rq == NULL) {
+ ide_unlock_port(hwif);
+ goto out;
+ }
}
/*
* Sanity: don't accept a request that isn't a PM request
- * if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the blk_fetch_request()
- * above to return us whatever is in the queue. Since we call
- * ide_do_request() ourselves, we end up taking requests while
- * the queue is blocked...
- *
- * We let requests forced at head of queue with ide-preempt
- * though. I hope that doesn't happen too much, hopefully not
- * unless the subdriver triggers such a thing in its own PM
- * state machine.
+ * if we are currently power managed.
*/
- if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
- blk_pm_request(rq) == 0 &&
- (rq->cmd_flags & REQ_PREEMPT) == 0) {
- /* there should be no pending command at this point */
- ide_unlock_port(hwif);
- goto plug_device;
- }
+ BUG_ON((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
+ blk_pm_request(rq) == 0);
hwif->rq = rq;
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index fa04715..2892b24 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -210,6 +210,7 @@
*/
static const struct drive_list_entry ivb_list[] = {
{ "QUANTUM FIREBALLlct10 05" , "A03.0900" },
+ { "QUANTUM FIREBALLlct20 30" , "APL.0900" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB00" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB01" },
{ "TSSTcorp CDDVDW SH-S202N" , "SB00" },
@@ -329,9 +330,6 @@
kfree(id);
- if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
- ide_dma_off(drive);
-
return 1;
out_err:
if (rc == 2)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 51af4ee..1bb106f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -818,6 +818,24 @@
return j;
}
+static void ide_host_enable_irqs(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
+ int i;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ /* clear any pending IRQs */
+ hwif->tp_ops->read_status(hwif);
+
+ /* unmask IRQs */
+ if (hwif->io_ports.ctl_addr)
+ hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
+ }
+}
+
/*
* This routine sets up the IRQ for an IDE interface.
*/
@@ -831,9 +849,6 @@
if (irq_handler == NULL)
irq_handler = ide_intr;
- if (io_ports->ctl_addr)
- hwif->tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
-
if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
goto out_up;
@@ -1404,6 +1419,8 @@
ide_port_tune_devices(hwif);
}
+ ide_host_enable_irqs(host);
+
ide_host_for_each_port(i, hwif, host) {
if (hwif == NULL)
continue;
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 95f45f9..f102fcc 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -4,7 +4,7 @@
source "drivers/firewire/Kconfig"
config IEEE1394
- tristate "Stable FireWire stack"
+ tristate "Legacy alternative FireWire driver stack"
depends on PCI || BROKEN
help
IEEE 1394 describes a high performance serial bus, which is also
@@ -33,11 +33,9 @@
module will be called ohci1394.
NOTE:
-
- You should only build either ohci1394 or the new firewire-ohci driver,
- but not both. If you nevertheless want to install both, you should
- configure them only as modules and blacklist the driver(s) which you
- don't want to have auto-loaded. Add either
+ If you want to install firewire-ohci and ohci1394 together, you
+ should configure them only as modules and blacklist the driver(s)
+ which you don't want to have auto-loaded. Add either
blacklist firewire-ohci
or
@@ -46,12 +44,7 @@
blacklist dv1394
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution. The latter two modules should be
- blacklisted together with ohci1394 because they depend on ohci1394.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use "install modulename /bin/true" for the modules to be
- blacklisted.
+ depending on your distribution.
comment "PCILynx controller requires I2C"
depends on IEEE1394 && I2C=n
@@ -105,7 +98,7 @@
default n
config IEEE1394_ETH1394
- tristate "IP over 1394"
+ tristate "IP networking over 1394 (experimental)"
depends on IEEE1394 && EXPERIMENTAL && INET
select IEEE1394_ETH1394_ROM_ENTRY
help
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index ce511d8..5be1bd4 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -514,7 +514,7 @@
.notifier_call = netevent_callback
};
-static int addr_init(void)
+static int __init addr_init(void)
{
addr_wq = create_singlethread_workqueue("ib_addr");
if (!addr_wq)
@@ -524,7 +524,7 @@
return 0;
}
-static void addr_cleanup(void)
+static void __exit addr_cleanup(void)
{
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 851de83..0753178 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2960,7 +2960,7 @@
kfree(cma_dev);
}
-static int cma_init(void)
+static int __init cma_init(void)
{
int ret, low, high, remaining;
@@ -2990,7 +2990,7 @@
return ret;
}
-static void cma_cleanup(void)
+static void __exit cma_cleanup(void)
{
ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb);
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 9209c53..8b92f85 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -319,7 +319,7 @@
ib_device);
struct hipz_query_port *rblock;
- if (index > 255) {
+ if (index < 0 || index > 255) {
ehca_err(&shca->ib_device, "Invalid index: %x.", index);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index ce4e6ef..fab18a2 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
#include "ehca_tools.h"
#include "hcp_if.h"
-#define HCAD_VERSION "0027"
+#define HCAD_VERSION "0028"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -506,6 +506,7 @@
shca->ib_device.detach_mcast = ehca_detach_mcast;
shca->ib_device.process_mad = ehca_process_mad;
shca->ib_device.mmap = ehca_mmap;
+ shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
shca->ib_device.uverbs_cmd_mask |=
@@ -1028,17 +1029,23 @@
goto module_init1;
}
+ ret = ehca_create_busmap();
+ if (ret) {
+ ehca_gen_err("Cannot create busmap.");
+ goto module_init2;
+ }
+
ret = ibmebus_register_driver(&ehca_driver);
if (ret) {
ehca_gen_err("Cannot register eHCA device driver");
ret = -EINVAL;
- goto module_init2;
+ goto module_init3;
}
ret = register_memory_notifier(&ehca_mem_nb);
if (ret) {
ehca_gen_err("Failed registering memory add/remove notifier");
- goto module_init3;
+ goto module_init4;
}
if (ehca_poll_all_eqs != 1) {
@@ -1053,9 +1060,12 @@
return 0;
-module_init3:
+module_init4:
ibmebus_unregister_driver(&ehca_driver);
+module_init3:
+ ehca_destroy_busmap();
+
module_init2:
ehca_destroy_slab_caches();
@@ -1073,6 +1083,8 @@
unregister_memory_notifier(&ehca_mem_nb);
+ ehca_destroy_busmap();
+
ehca_destroy_slab_caches();
ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 72f83f7..7663a2a 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -53,6 +53,38 @@
/* max number of rpages (per hcall register_rpages) */
#define MAX_RPAGES 512
+/* DMEM toleration management */
+#define EHCA_SECTSHIFT SECTION_SIZE_BITS
+#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
+#define EHCA_HUGEPAGESHIFT 34
+#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
+#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
+#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
+#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
+#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
+#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
+#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
+#define EHCA_DIR_MAP_SIZE (0x10000)
+#define EHCA_ENT_MAP_SIZE (0x10000)
+#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
+
+static unsigned long ehca_mr_len;
+
+/*
+ * Memory map data structures
+ */
+struct ehca_dir_bmap {
+ u64 ent[EHCA_MAP_ENTRIES];
+};
+struct ehca_top_bmap {
+ struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
+};
+struct ehca_bmap {
+ struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
+};
+
+static struct ehca_bmap *ehca_bmap;
+
static struct kmem_cache *mr_cache;
static struct kmem_cache *mw_cache;
@@ -68,6 +100,8 @@
#define EHCA_MR_PGSHIFT1M 20
#define EHCA_MR_PGSHIFT16M 24
+static u64 ehca_map_vaddr(void *caddr);
+
static u32 ehca_encode_hwpage_size(u32 pgsize)
{
int log = ilog2(pgsize);
@@ -135,7 +169,8 @@
goto get_dma_mr_exit0;
}
- ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
+ ret = ehca_reg_maxmr(shca, e_maxmr,
+ (void *)ehca_map_vaddr((void *)KERNELBASE),
mr_access_flags, e_pd,
&e_maxmr->ib.ib_mr.lkey,
&e_maxmr->ib.ib_mr.rkey);
@@ -251,7 +286,7 @@
ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey);
+ &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
if (ret) {
ib_mr = ERR_PTR(ret);
goto reg_phys_mr_exit1;
@@ -370,7 +405,7 @@
ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey);
+ &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
ehca_warn(pd->device, "failed to register mr "
"with hwpage_size=%llx", hwpage_size);
@@ -794,7 +829,7 @@
ret = ehca_reg_mr(shca, e_fmr, NULL,
fmr_attr->max_pages * (1 << fmr_attr->page_shift),
mr_access_flags, e_pd, &pginfo,
- &tmp_lkey, &tmp_rkey);
+ &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
if (ret) {
ib_fmr = ERR_PTR(ret);
goto alloc_fmr_exit1;
@@ -983,6 +1018,10 @@
/*----------------------------------------------------------------------*/
+static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo);
+
int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_mr *e_mr,
u64 *iova_start,
@@ -991,7 +1030,8 @@
struct ehca_pd *e_pd,
struct ehca_mr_pginfo *pginfo,
u32 *lkey, /*OUT*/
- u32 *rkey) /*OUT*/
+ u32 *rkey, /*OUT*/
+ enum ehca_reg_type reg_type)
{
int ret;
u64 h_ret;
@@ -1015,7 +1055,13 @@
e_mr->ipz_mr_handle = hipzout.handle;
- ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
+ if (reg_type == EHCA_REG_BUSMAP_MR)
+ ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
+ else if (reg_type == EHCA_REG_MR)
+ ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
+ else
+ ret = -EINVAL;
+
if (ret)
goto ehca_reg_mr_exit1;
@@ -1316,7 +1362,7 @@
e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
- e_pd, pginfo, lkey, rkey);
+ e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
if (ret) {
u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
memcpy(&e_mr->flags, &(save_mr.flags),
@@ -1409,7 +1455,7 @@
ret = ehca_reg_mr(shca, e_fmr, NULL,
(e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
- &tmp_rkey);
+ &tmp_rkey, EHCA_REG_MR);
if (ret) {
u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
memcpy(&e_fmr->flags, &(save_mr.flags),
@@ -1478,6 +1524,90 @@
} /* end ehca_reg_smr() */
/*----------------------------------------------------------------------*/
+static inline void *ehca_calc_sectbase(int top, int dir, int idx)
+{
+ unsigned long ret = idx;
+ ret |= dir << EHCA_DIR_INDEX_SHIFT;
+ ret |= top << EHCA_TOP_INDEX_SHIFT;
+ return abs_to_virt(ret << SECTION_SIZE_BITS);
+}
+
+#define ehca_bmap_valid(entry) \
+ ((u64)entry != (u64)EHCA_INVAL_ADDR)
+
+static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
+ struct ehca_shca *shca, struct ehca_mr *mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ u64 h_ret = 0;
+ unsigned long page = 0;
+ u64 rpage = virt_to_abs(kpage);
+ int page_count;
+
+ void *sectbase = ehca_calc_sectbase(top, dir, idx);
+ if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
+ ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
+ "hwpage_size does not fit to "
+ "section start address");
+ }
+ page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
+
+ while (page < page_count) {
+ u64 rnum;
+ for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
+ rnum++) {
+ void *pg = sectbase + ((page++) * pginfo->hwpage_size);
+ kpage[rnum] = virt_to_abs(pg);
+ }
+
+ h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
+ ehca_encode_hwpage_size(pginfo->hwpage_size),
+ 0, rpage, rnum);
+
+ if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
+ ehca_err(&shca->ib_device, "register_rpage_mr failed");
+ return h_ret;
+ }
+ }
+ return h_ret;
+}
+
+static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
+ struct ehca_shca *shca, struct ehca_mr *mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ u64 hret = H_SUCCESS;
+ int idx;
+
+ for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
+ continue;
+
+ hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
+ pginfo);
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+ return hret;
+ }
+ return hret;
+}
+
+static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
+ struct ehca_mr *mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ u64 hret = H_SUCCESS;
+ int dir;
+
+ for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+ continue;
+
+ hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
+ if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
+ return hret;
+ }
+ return hret;
+}
/* register internal max-MR to internal SHCA */
int ehca_reg_internal_maxmr(
@@ -1495,6 +1625,11 @@
u32 num_hwpages;
u64 hw_pgsize;
+ if (!ehca_bmap) {
+ ret = -EFAULT;
+ goto ehca_reg_internal_maxmr_exit0;
+ }
+
e_mr = ehca_mr_new();
if (!e_mr) {
ehca_err(&shca->ib_device, "out of memory");
@@ -1504,8 +1639,8 @@
e_mr->flags |= EHCA_MR_FLAG_MAXMR;
/* register internal max-MR on HCA */
- size_maxmr = (u64)high_memory - PAGE_OFFSET;
- iova_start = (u64 *)KERNELBASE;
+ size_maxmr = ehca_mr_len;
+ iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
ib_pbuf.addr = 0;
ib_pbuf.size = size_maxmr;
num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
@@ -1524,7 +1659,7 @@
ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
&pginfo, &e_mr->ib.ib_mr.lkey,
- &e_mr->ib.ib_mr.rkey);
+ &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
if (ret) {
ehca_err(&shca->ib_device, "reg of internal max MR failed, "
"e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
@@ -2077,8 +2212,8 @@
u64 *iova_start)
{
/* a MR is treated as max-MR only if it fits following: */
- if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
- (iova_start == (void *)KERNELBASE)) {
+ if ((size == ehca_mr_len) &&
+ (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
ehca_gen_dbg("this is a max-MR");
return 1;
} else
@@ -2184,3 +2319,350 @@
if (mw_cache)
kmem_cache_destroy(mw_cache);
}
+
+static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
+ int dir)
+{
+ if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
+ ehca_top_bmap->dir[dir] =
+ kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
+ if (!ehca_top_bmap->dir[dir])
+ return -ENOMEM;
+ /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+ memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
+ }
+ return 0;
+}
+
+static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
+{
+ if (!ehca_bmap_valid(ehca_bmap->top[top])) {
+ ehca_bmap->top[top] =
+ kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
+ if (!ehca_bmap->top[top])
+ return -ENOMEM;
+ /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+ memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
+ }
+ return ehca_init_top_bmap(ehca_bmap->top[top], dir);
+}
+
+static inline int ehca_calc_index(unsigned long i, unsigned long s)
+{
+ return (i >> s) & EHCA_INDEX_MASK;
+}
+
+void ehca_destroy_busmap(void)
+{
+ int top, dir;
+
+ if (!ehca_bmap)
+ return;
+
+ for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]))
+ continue;
+ for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+ continue;
+
+ kfree(ehca_bmap->top[top]->dir[dir]);
+ }
+
+ kfree(ehca_bmap->top[top]);
+ }
+
+ kfree(ehca_bmap);
+ ehca_bmap = NULL;
+}
+
+static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
+{
+ unsigned long i, start_section, end_section;
+ int top, dir, idx;
+
+ if (!nr_pages)
+ return 0;
+
+ if (!ehca_bmap) {
+ ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
+ if (!ehca_bmap)
+ return -ENOMEM;
+ /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
+ memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
+ }
+
+ start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE;
+ end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
+ for (i = start_section; i < end_section; i++) {
+ int ret;
+ top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
+ dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
+ idx = i & EHCA_INDEX_MASK;
+
+ ret = ehca_init_bmap(ehca_bmap, top, dir);
+ if (ret) {
+ ehca_destroy_busmap();
+ return ret;
+ }
+ ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
+ ehca_mr_len += EHCA_SECTSIZE;
+ }
+ return 0;
+}
+
+static int ehca_is_hugepage(unsigned long pfn)
+{
+ int page_order;
+
+ if (pfn & EHCA_HUGEPAGE_PFN_MASK)
+ return 0;
+
+ page_order = compound_order(pfn_to_page(pfn));
+ if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
+ return 0;
+
+ return 1;
+}
+
+static int ehca_create_busmap_callback(unsigned long initial_pfn,
+ unsigned long total_nr_pages, void *arg)
+{
+ int ret;
+ unsigned long pfn, start_pfn, end_pfn, nr_pages;
+
+ if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
+ return ehca_update_busmap(initial_pfn, total_nr_pages);
+
+ /* Given chunk is >= 16GB -> check for hugepages */
+ start_pfn = initial_pfn;
+ end_pfn = initial_pfn + total_nr_pages;
+ pfn = start_pfn;
+
+ while (pfn < end_pfn) {
+ if (ehca_is_hugepage(pfn)) {
+ /* Add mem found in front of the hugepage */
+ nr_pages = pfn - start_pfn;
+ ret = ehca_update_busmap(start_pfn, nr_pages);
+ if (ret)
+ return ret;
+ /* Skip the hugepage */
+ pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
+ start_pfn = pfn;
+ } else
+ pfn += (EHCA_SECTSIZE / PAGE_SIZE);
+ }
+
+ /* Add mem found behind the hugepage(s) */
+ nr_pages = pfn - start_pfn;
+ return ehca_update_busmap(start_pfn, nr_pages);
+}
+
+int ehca_create_busmap(void)
+{
+ int ret;
+
+ ehca_mr_len = 0;
+ ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
+ ehca_create_busmap_callback);
+ return ret;
+}
+
+static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
+ struct ehca_mr *e_mr,
+ struct ehca_mr_pginfo *pginfo)
+{
+ int top;
+ u64 hret, *kpage;
+
+ kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
+ if (!kpage) {
+ ehca_err(&shca->ib_device, "kpage alloc failed");
+ return -ENOMEM;
+ }
+ for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
+ if (!ehca_bmap_valid(ehca_bmap->top[top]))
+ continue;
+ hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
+ if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
+ break;
+ }
+
+ ehca_free_fw_ctrlblock(kpage);
+
+ if (hret == H_SUCCESS)
+ return 0; /* Everything is fine */
+ else {
+ ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
+ "h_ret=%lli e_mr=%p top=%x lkey=%x "
+ "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
+ e_mr->ib.ib_mr.lkey,
+ shca->ipz_hca_handle.handle,
+ e_mr->ipz_mr_handle.handle);
+ return ehca2ib_return_code(hret);
+ }
+}
+
+static u64 ehca_map_vaddr(void *caddr)
+{
+ int top, dir, idx;
+ unsigned long abs_addr, offset;
+ u64 entry;
+
+ if (!ehca_bmap)
+ return EHCA_INVAL_ADDR;
+
+ abs_addr = virt_to_abs(caddr);
+ top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
+ if (!ehca_bmap_valid(ehca_bmap->top[top]))
+ return EHCA_INVAL_ADDR;
+
+ dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
+ if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
+ return EHCA_INVAL_ADDR;
+
+ idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
+
+ entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
+ if (ehca_bmap_valid(entry)) {
+ offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
+ return entry | offset;
+ } else
+ return EHCA_INVAL_ADDR;
+}
+
+static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == EHCA_INVAL_ADDR;
+}
+
+static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ if (cpu_addr)
+ return ehca_map_vaddr(cpu_addr);
+ else
+ return EHCA_INVAL_ADDR;
+}
+
+static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is only a stub; nothing to be done here */
+}
+
+static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ if (offset + size > PAGE_SIZE)
+ return EHCA_INVAL_ADDR;
+
+ addr = ehca_map_vaddr(page_address(page));
+ if (!ehca_dma_mapping_error(dev, addr))
+ addr += offset;
+
+ return addr;
+}
+
+static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is only a stub; nothing to be done here */
+}
+
+static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i) {
+ u64 addr;
+ addr = ehca_map_vaddr(sg_virt(sg));
+ if (ehca_dma_mapping_error(dev, addr))
+ return 0;
+
+ sg->dma_address = addr;
+ sg->dma_length = sg->length;
+ }
+ return nents;
+}
+
+static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ /* This is only a stub; nothing to be done here */
+}
+
+static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
+{
+ return sg->dma_address;
+}
+
+static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
+{
+ return sg->length;
+}
+
+static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+}
+
+static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+}
+
+static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+ u64 dma_addr;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p) {
+ addr = page_address(p);
+ dma_addr = ehca_map_vaddr(addr);
+ if (ehca_dma_mapping_error(dev, dma_addr)) {
+ free_pages((unsigned long)addr, get_order(size));
+ return NULL;
+ }
+ if (dma_handle)
+ *dma_handle = dma_addr;
+ return addr;
+ }
+ return NULL;
+}
+
+static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ if (cpu_addr && size)
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+
+struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
+ .mapping_error = ehca_dma_mapping_error,
+ .map_single = ehca_dma_map_single,
+ .unmap_single = ehca_dma_unmap_single,
+ .map_page = ehca_dma_map_page,
+ .unmap_page = ehca_dma_unmap_page,
+ .map_sg = ehca_dma_map_sg,
+ .unmap_sg = ehca_dma_unmap_sg,
+ .dma_address = ehca_dma_address,
+ .dma_len = ehca_dma_len,
+ .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
+ .sync_single_for_device = ehca_dma_sync_single_for_device,
+ .alloc_coherent = ehca_dma_alloc_coherent,
+ .free_coherent = ehca_dma_free_coherent,
+};
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
index bc8f4e3..50d8b51 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.h
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h
@@ -42,6 +42,11 @@
#ifndef _EHCA_MRMW_H_
#define _EHCA_MRMW_H_
+enum ehca_reg_type {
+ EHCA_REG_MR,
+ EHCA_REG_BUSMAP_MR
+};
+
int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_mr *e_mr,
u64 *iova_start,
@@ -50,7 +55,8 @@
struct ehca_pd *e_pd,
struct ehca_mr_pginfo *pginfo,
u32 *lkey,
- u32 *rkey);
+ u32 *rkey,
+ enum ehca_reg_type reg_type);
int ehca_reg_mr_rpages(struct ehca_shca *shca,
struct ehca_mr *e_mr,
@@ -118,4 +124,9 @@
void ehca_mr_deletenew(struct ehca_mr *mr);
+int ehca_create_busmap(void);
+
+void ehca_destroy_busmap(void);
+
+extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index d606edf..065b208 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -352,10 +352,14 @@
BUG_ON(!mtts);
+ dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+ list_len * sizeof (u64), DMA_TO_DEVICE);
+
for (i = 0; i < list_len; ++i)
mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
- dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE);
+ dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
+ list_len * sizeof (u64), DMA_TO_DEVICE);
}
int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
@@ -803,12 +807,15 @@
wmb();
+ dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
+ list_len * sizeof(u64), DMA_TO_DEVICE);
+
for (i = 0; i < list_len; ++i)
fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
MTHCA_MTT_FLAG_PRESENT);
- dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
+ dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
+ list_len * sizeof(u64), DMA_TO_DEVICE);
fmr->mem.arbel.mpt->key = cpu_to_be32(key);
fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 11c7d66..114b802 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -472,6 +472,7 @@
static void nes_retrans_expired(struct nes_cm_node *cm_node)
{
+ struct iw_cm_id *cm_id = cm_node->cm_id;
switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_CLOSING:
@@ -479,7 +480,9 @@
break;
case NES_CM_STATE_LAST_ACK:
case NES_CM_STATE_FIN_WAIT1:
- case NES_CM_STATE_MPAREJ_RCVD:
+ if (cm_node->cm_id)
+ cm_id->rem_ref(cm_id);
+ cm_node->state = NES_CM_STATE_CLOSED;
send_reset(cm_node, NULL);
break;
default:
@@ -1406,6 +1409,7 @@
case NES_CM_STATE_CLOSED:
drop_packet(skb);
break;
+ case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
case NES_CM_STATE_TIME_WAIT:
@@ -1413,8 +1417,6 @@
rem_ref_cm_node(cm_node->cm_core, cm_node);
drop_packet(skb);
break;
- case NES_CM_STATE_FIN_WAIT1:
- nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
default:
drop_packet(skb);
break;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 64d5cfd..21e0fd3 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -654,7 +654,7 @@
default:
props->max_qp_rd_atom = 0;
}
- props->max_qp_init_rd_atom = props->max_qp_wr;
+ props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE;
props->max_map_per_fmr = 1;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9b60b6b..7c8e712 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -75,6 +75,7 @@
depends on LEDS_CLASS && X86 && EXPERIMENTAL
help
This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
+ You have to set leds-alix2.force=1 for boards with Award BIOS.
config LEDS_H1940
tristate "LED Support for iPAQ H1940 device"
@@ -145,15 +146,16 @@
of_platform devices. For instance, LEDs which are listed in a "dts"
file.
-config LEDS_LP5521
- tristate "LED Support for the LP5521 LEDs"
+config LEDS_LP3944
+ tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
depends on LEDS_CLASS && I2C
help
- If you say 'Y' here you get support for the National Semiconductor
- LP5521 LED driver used in n8x0 boards.
+ This option enables support for LEDs connected to the National
+ Semiconductor LP3944 Lighting Management Unit (LMU) also known as
+ Fun Light Chip.
- This driver can be built as a module by choosing 'M'. The module
- will be called leds-lp5521.
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp3944.
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 2d41c4d..e8cdcf7 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -20,6 +20,7 @@
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c
index ddbd773..731d4ee 100644
--- a/drivers/leds/leds-alix2.c
+++ b/drivers/leds/leds-alix2.c
@@ -14,7 +14,7 @@
static int force = 0;
module_param(force, bool, 0444);
-MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs");
+MODULE_PARM_DESC(force, "Assume system has ALIX.2/ALIX.3 style LEDs");
struct alix_led {
struct led_classdev cdev;
@@ -155,6 +155,11 @@
goto out;
}
+ /* enable output on GPIO for LED 1,2,3 */
+ outl(1 << 6, 0x6104);
+ outl(1 << 9, 0x6184);
+ outl(1 << 11, 0x6184);
+
pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
if (!IS_ERR(pdev)) {
ret = platform_driver_probe(&alix_led_driver, alix_led_probe);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 4149ecb..779d7f2 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -97,6 +97,10 @@
enum led_ids led_id;
enum led_colors color;
enum led_bits state;
+
+ /* General attributes of RGB LEDs */
+ int wave_pattern;
+ int rgb_current;
};
@@ -254,7 +258,7 @@
bd2802_reset_cancel(led);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
@@ -275,9 +279,9 @@
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT1SETUP);
bd2802_write_byte(led->client, reg, BD2802_CURRENT_000);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_CURRENT2SETUP);
- bd2802_write_byte(led->client, reg, BD2802_CURRENT_032);
+ bd2802_write_byte(led->client, reg, led->rgb_current);
reg = bd2802_get_reg_addr(id, color, BD2802_REG_WAVEPATTERN);
- bd2802_write_byte(led->client, reg, BD2802_PATTERN_HALF);
+ bd2802_write_byte(led->client, reg, led->wave_pattern);
bd2802_enable(led, id);
bd2802_update_state(led, id, color, BD2802_BLINK);
@@ -406,7 +410,7 @@
ret = device_create_file(&led->client->dev,
bd2802_addr_attributes[i]);
if (ret) {
- dev_err(&led->client->dev, "failed to sysfs file %s\n",
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
bd2802_addr_attributes[i]->attr.name);
goto failed_remove_files;
}
@@ -483,6 +487,52 @@
.store = bd2802_store_adv_conf,
};
+#define BD2802_CONTROL_ATTR(attr_name, name_str) \
+static ssize_t bd2802_show_##attr_name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ ssize_t ret; \
+ down_read(&led->rwsem); \
+ ret = sprintf(buf, "0x%02x\n", led->attr_name); \
+ up_read(&led->rwsem); \
+ return ret; \
+} \
+static ssize_t bd2802_store_##attr_name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ struct bd2802_led *led = i2c_get_clientdata(to_i2c_client(dev));\
+ unsigned long val; \
+ int ret; \
+ if (!count) \
+ return -EINVAL; \
+ ret = strict_strtoul(buf, 16, &val); \
+ if (ret) \
+ return ret; \
+ down_write(&led->rwsem); \
+ led->attr_name = val; \
+ up_write(&led->rwsem); \
+ return count; \
+} \
+static struct device_attribute bd2802_##attr_name##_attr = { \
+ .attr = { \
+ .name = name_str, \
+ .mode = 0644, \
+ .owner = THIS_MODULE \
+ }, \
+ .show = bd2802_show_##attr_name, \
+ .store = bd2802_store_##attr_name, \
+};
+
+BD2802_CONTROL_ATTR(wave_pattern, "wave_pattern");
+BD2802_CONTROL_ATTR(rgb_current, "rgb_current");
+
+static struct device_attribute *bd2802_attributes[] = {
+ &bd2802_adv_conf_attr,
+ &bd2802_wave_pattern_attr,
+ &bd2802_rgb_current_attr,
+};
+
static void bd2802_led_work(struct work_struct *work)
{
struct bd2802_led *led = container_of(work, struct bd2802_led, work);
@@ -538,7 +588,6 @@
led->cdev_led1r.brightness = LED_OFF;
led->cdev_led1r.brightness_set = bd2802_set_led1r_brightness;
led->cdev_led1r.blink_set = bd2802_set_led1r_blink;
- led->cdev_led1r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1r);
if (ret < 0) {
@@ -551,7 +600,6 @@
led->cdev_led1g.brightness = LED_OFF;
led->cdev_led1g.brightness_set = bd2802_set_led1g_brightness;
led->cdev_led1g.blink_set = bd2802_set_led1g_blink;
- led->cdev_led1g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1g);
if (ret < 0) {
@@ -564,7 +612,6 @@
led->cdev_led1b.brightness = LED_OFF;
led->cdev_led1b.brightness_set = bd2802_set_led1b_brightness;
led->cdev_led1b.blink_set = bd2802_set_led1b_blink;
- led->cdev_led1b.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led1b);
if (ret < 0) {
@@ -577,7 +624,6 @@
led->cdev_led2r.brightness = LED_OFF;
led->cdev_led2r.brightness_set = bd2802_set_led2r_brightness;
led->cdev_led2r.blink_set = bd2802_set_led2r_blink;
- led->cdev_led2r.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2r);
if (ret < 0) {
@@ -590,7 +636,6 @@
led->cdev_led2g.brightness = LED_OFF;
led->cdev_led2g.brightness_set = bd2802_set_led2g_brightness;
led->cdev_led2g.blink_set = bd2802_set_led2g_blink;
- led->cdev_led2g.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&led->client->dev, &led->cdev_led2g);
if (ret < 0) {
@@ -640,7 +685,7 @@
{
struct bd2802_led *led;
struct bd2802_led_platform_data *pdata;
- int ret;
+ int ret, i;
led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
if (!led) {
@@ -670,13 +715,20 @@
/* To save the power, reset BD2802 after detecting */
gpio_set_value(led->pdata->reset_gpio, 0);
+ /* Default attributes */
+ led->wave_pattern = BD2802_PATTERN_HALF;
+ led->rgb_current = BD2802_CURRENT_032;
+
init_rwsem(&led->rwsem);
- ret = device_create_file(&client->dev, &bd2802_adv_conf_attr);
- if (ret) {
- dev_err(&client->dev, "failed to create sysfs file %s\n",
- bd2802_adv_conf_attr.attr.name);
- goto failed_free;
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++) {
+ ret = device_create_file(&led->client->dev,
+ bd2802_attributes[i]);
+ if (ret) {
+ dev_err(&led->client->dev, "failed: sysfs file %s\n",
+ bd2802_attributes[i]->attr.name);
+ goto failed_unregister_dev_file;
+ }
}
ret = bd2802_register_led_classdev(led);
@@ -686,7 +738,8 @@
return 0;
failed_unregister_dev_file:
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i--; i >= 0; i--)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
failed_free:
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -697,12 +750,14 @@
static int __exit bd2802_remove(struct i2c_client *client)
{
struct bd2802_led *led = i2c_get_clientdata(client);
+ int i;
- bd2802_unregister_led_classdev(led);
gpio_set_value(led->pdata->reset_gpio, 0);
+ bd2802_unregister_led_classdev(led);
if (led->adf_on)
bd2802_disable_adv_conf(led);
- device_remove_file(&client->dev, &bd2802_adv_conf_attr);
+ for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
+ device_remove_file(&led->client->dev, bd2802_attributes[i]);
i2c_set_clientdata(client, NULL);
kfree(led);
@@ -723,8 +778,7 @@
struct bd2802_led *led = i2c_get_clientdata(client);
if (!bd2802_is_all_off(led) || led->adf_on) {
- gpio_set_value(led->pdata->reset_gpio, 1);
- udelay(100);
+ bd2802_reset_cancel(led);
bd2802_restore_state(led);
}
@@ -762,4 +816,4 @@
MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
MODULE_DESCRIPTION("BD2802 LED driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index d210905..6b06638 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -76,7 +76,7 @@
struct gpio_led_data *led_dat, struct device *parent,
int (*blink_set)(unsigned, unsigned long *, unsigned long *))
{
- int ret;
+ int ret, state;
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
@@ -99,11 +99,15 @@
led_dat->cdev.blink_set = gpio_blink_set;
}
led_dat->cdev.brightness_set = gpio_led_set;
- led_dat->cdev.brightness = LED_OFF;
+ if (template->default_state == LEDS_GPIO_DEFSTATE_KEEP)
+ state = !!gpio_get_value(led_dat->gpio) ^ led_dat->active_low;
+ else
+ state = (template->default_state == LEDS_GPIO_DEFSTATE_ON);
+ led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- ret = gpio_direction_output(led_dat->gpio, led_dat->active_low);
+ ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
if (ret < 0)
goto err;
@@ -129,7 +133,7 @@
}
#ifdef CONFIG_LEDS_GPIO_PLATFORM
-static int gpio_led_probe(struct platform_device *pdev)
+static int __devinit gpio_led_probe(struct platform_device *pdev)
{
struct gpio_led_platform_data *pdata = pdev->dev.platform_data;
struct gpio_led_data *leds_data;
@@ -223,12 +227,22 @@
memset(&led, 0, sizeof(led));
for_each_child_of_node(np, child) {
enum of_gpio_flags flags;
+ const char *state;
led.gpio = of_get_gpio_flags(child, 0, &flags);
led.active_low = flags & OF_GPIO_ACTIVE_LOW;
led.name = of_get_property(child, "label", NULL) ? : child->name;
led.default_trigger =
of_get_property(child, "linux,default-trigger", NULL);
+ state = of_get_property(child, "default-state", NULL);
+ if (state) {
+ if (!strcmp(state, "keep"))
+ led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+ else if(!strcmp(state, "on"))
+ led.default_state = LEDS_GPIO_DEFSTATE_ON;
+ else
+ led.default_state = LEDS_GPIO_DEFSTATE_OFF;
+ }
ret = create_gpio_led(&led, &pdata->led_data[pdata->num_leds++],
&ofdev->dev, NULL);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
new file mode 100644
index 0000000..5946208
--- /dev/null
+++ b/drivers/leds/leds-lp3944.c
@@ -0,0 +1,466 @@
+/*
+ * leds-lp3944.c - driver for National Semiconductor LP3944 Funlight Chip
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/*
+ * I2C driver for National Semiconductor LP3944 Funlight Chip
+ * http://www.national.com/pf/LP/LP3944.html
+ *
+ * This helper chip can drive up to 8 leds, with two programmable DIM modes;
+ * it could even be used as a gpio expander but this driver assumes it is used
+ * as a led controller.
+ *
+ * The DIM modes are used to set _blink_ patterns for leds, the pattern is
+ * specified supplying two parameters:
+ * - period: from 0s to 1.6s
+ * - duty cycle: percentage of the period the led is on, from 0 to 100
+ *
+ * LP3944 can be found on Motorola A910 smartphone, where it drives the rgb
+ * leds, the camera flash light and the displays backlights.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds-lp3944.h>
+
+/* Read Only Registers */
+#define LP3944_REG_INPUT1 0x00 /* LEDs 0-7 InputRegister (Read Only) */
+#define LP3944_REG_REGISTER1 0x01 /* None (Read Only) */
+
+#define LP3944_REG_PSC0 0x02 /* Frequency Prescaler 0 (R/W) */
+#define LP3944_REG_PWM0 0x03 /* PWM Register 0 (R/W) */
+#define LP3944_REG_PSC1 0x04 /* Frequency Prescaler 1 (R/W) */
+#define LP3944_REG_PWM1 0x05 /* PWM Register 1 (R/W) */
+#define LP3944_REG_LS0 0x06 /* LEDs 0-3 Selector (R/W) */
+#define LP3944_REG_LS1 0x07 /* LEDs 4-7 Selector (R/W) */
+
+/* These registers are not used to control leds in LP3944, they can store
+ * arbitrary values which the chip will ignore.
+ */
+#define LP3944_REG_REGISTER8 0x08
+#define LP3944_REG_REGISTER9 0x09
+
+#define LP3944_DIM0 0
+#define LP3944_DIM1 1
+
+/* period in ms */
+#define LP3944_PERIOD_MIN 0
+#define LP3944_PERIOD_MAX 1600
+
+/* duty cycle is a percentage */
+#define LP3944_DUTY_CYCLE_MIN 0
+#define LP3944_DUTY_CYCLE_MAX 100
+
+#define ldev_to_led(c) container_of(c, struct lp3944_led_data, ldev)
+
+/* Saved data */
+struct lp3944_led_data {
+ u8 id;
+ enum lp3944_type type;
+ enum lp3944_status status;
+ struct led_classdev ldev;
+ struct i2c_client *client;
+ struct work_struct work;
+};
+
+struct lp3944_data {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct lp3944_led_data leds[LP3944_LEDS_MAX];
+};
+
+static int lp3944_reg_read(struct i2c_client *client, u8 reg, u8 *value)
+{
+ int tmp;
+
+ tmp = i2c_smbus_read_byte_data(client, reg);
+ if (tmp < 0)
+ return -EINVAL;
+
+ *value = tmp;
+
+ return 0;
+}
+
+static int lp3944_reg_write(struct i2c_client *client, u8 reg, u8 value)
+{
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+/**
+ * Set the period for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @period: period of a blink, that is a on/off cycle, expressed in ms.
+ */
+static int lp3944_dim_set_period(struct i2c_client *client, u8 dim, u16 period)
+{
+ u8 psc_reg;
+ u8 psc_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ psc_reg = LP3944_REG_PSC0;
+ else if (dim == LP3944_DIM1)
+ psc_reg = LP3944_REG_PSC1;
+ else
+ return -EINVAL;
+
+ /* Convert period to Prescaler value */
+ if (period > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ psc_value = (period * 255) / LP3944_PERIOD_MAX;
+
+ err = lp3944_reg_write(client, psc_reg, psc_value);
+
+ return err;
+}
+
+/**
+ * Set the duty cycle for DIM status
+ *
+ * @client: the i2c client
+ * @dim: either LP3944_DIM0 or LP3944_DIM1
+ * @duty_cycle: percentage of a period during which a led is ON
+ */
+static int lp3944_dim_set_dutycycle(struct i2c_client *client, u8 dim,
+ u8 duty_cycle)
+{
+ u8 pwm_reg;
+ u8 pwm_value;
+ int err;
+
+ if (dim == LP3944_DIM0)
+ pwm_reg = LP3944_REG_PWM0;
+ else if (dim == LP3944_DIM1)
+ pwm_reg = LP3944_REG_PWM1;
+ else
+ return -EINVAL;
+
+ /* Convert duty cycle to PWM value */
+ if (duty_cycle > LP3944_DUTY_CYCLE_MAX)
+ return -EINVAL;
+
+ pwm_value = (duty_cycle * 255) / LP3944_DUTY_CYCLE_MAX;
+
+ err = lp3944_reg_write(client, pwm_reg, pwm_value);
+
+ return err;
+}
+
+/**
+ * Set the led status
+ *
+ * @led: a lp3944_led_data structure
+ * @status: one of LP3944_LED_STATUS_OFF
+ * LP3944_LED_STATUS_ON
+ * LP3944_LED_STATUS_DIM0
+ * LP3944_LED_STATUS_DIM1
+ */
+static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
+{
+ struct lp3944_data *data = i2c_get_clientdata(led->client);
+ u8 id = led->id;
+ u8 reg;
+ u8 val = 0;
+ int err;
+
+ dev_dbg(&led->client->dev, "%s: %s, status before normalization:%d\n",
+ __func__, led->ldev.name, status);
+
+ switch (id) {
+ case LP3944_LED0:
+ case LP3944_LED1:
+ case LP3944_LED2:
+ case LP3944_LED3:
+ reg = LP3944_REG_LS0;
+ break;
+ case LP3944_LED4:
+ case LP3944_LED5:
+ case LP3944_LED6:
+ case LP3944_LED7:
+ id -= LP3944_LED4;
+ reg = LP3944_REG_LS1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (status > LP3944_LED_STATUS_DIM1)
+ return -EINVAL;
+
+ /* invert only 0 and 1, leave unchanged the other values,
+ * remember we are abusing status to set blink patterns
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
+ status = 1 - status;
+
+ mutex_lock(&data->lock);
+ lp3944_reg_read(led->client, reg, &val);
+
+ val &= ~(LP3944_LED_STATUS_MASK << (id << 1));
+ val |= (status << (id << 1));
+
+ dev_dbg(&led->client->dev, "%s: %s, reg:%d id:%d status:%d val:%#x\n",
+ __func__, led->ldev.name, reg, id, status, val);
+
+ /* set led status */
+ err = lp3944_reg_write(led->client, reg, val);
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static int lp3944_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+ u16 period;
+ u8 duty_cycle;
+ int err;
+
+ /* units are in ms */
+ if (*delay_on + *delay_off > LP3944_PERIOD_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ /* Special case: the leds subsystem requires a default user
+ * friendly blink pattern for the LED. Let's blink the led
+ * slowly (1Hz).
+ */
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ period = (*delay_on) + (*delay_off);
+
+ /* duty_cycle is the percentage of period during which the led is ON */
+ duty_cycle = 100 * (*delay_on) / period;
+
+ /* invert duty cycle for inverted leds, this has the same effect of
+ * swapping delay_on and delay_off
+ */
+ if (led->type == LP3944_LED_TYPE_LED_INVERTED)
+ duty_cycle = 100 - duty_cycle;
+
+ /* NOTE: using always the first DIM mode, this means that all leds
+ * will have the same blinking pattern.
+ *
+ * We could find a way later to have two leds blinking in hardware
+ * with different patterns at the same time, falling back to software
+ * control for the other ones.
+ */
+ err = lp3944_dim_set_period(led->client, LP3944_DIM0, period);
+ if (err)
+ return err;
+
+ err = lp3944_dim_set_dutycycle(led->client, LP3944_DIM0, duty_cycle);
+ if (err)
+ return err;
+
+ dev_dbg(&led->client->dev, "%s: OK hardware accelerated blink!\n",
+ __func__);
+
+ led->status = LP3944_LED_STATUS_DIM0;
+ schedule_work(&led->work);
+
+ return 0;
+}
+
+static void lp3944_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lp3944_led_data *led = ldev_to_led(led_cdev);
+
+ dev_dbg(&led->client->dev, "%s: %s, %d\n",
+ __func__, led_cdev->name, brightness);
+
+ led->status = brightness;
+ schedule_work(&led->work);
+}
+
+static void lp3944_led_work(struct work_struct *work)
+{
+ struct lp3944_led_data *led;
+
+ led = container_of(work, struct lp3944_led_data, work);
+ lp3944_led_set(led, led->status);
+}
+
+static int lp3944_configure(struct i2c_client *client,
+ struct lp3944_data *data,
+ struct lp3944_platform_data *pdata)
+{
+ int i, err = 0;
+
+ for (i = 0; i < pdata->leds_size; i++) {
+ struct lp3944_led *pled = &pdata->leds[i];
+ struct lp3944_led_data *led = &data->leds[i];
+ led->client = client;
+ led->id = i;
+
+ switch (pled->type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led->type = pled->type;
+ led->status = pled->status;
+ led->ldev.name = pled->name;
+ led->ldev.max_brightness = 1;
+ led->ldev.brightness_set = lp3944_led_set_brightness;
+ led->ldev.blink_set = lp3944_led_set_blink;
+ led->ldev.flags = LED_CORE_SUSPENDRESUME;
+
+ INIT_WORK(&led->work, lp3944_led_work);
+ err = led_classdev_register(&client->dev, &led->ldev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "couldn't register LED %s\n",
+ led->ldev.name);
+ goto exit;
+ }
+
+ /* to expose the default value to userspace */
+ led->ldev.brightness = led->status;
+
+ /* Set the default led status */
+ err = lp3944_led_set(led, led->status);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "%s couldn't set STATUS %d\n",
+ led->ldev.name, led->status);
+ goto exit;
+ }
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+
+ }
+ }
+ return 0;
+
+exit:
+ if (i > 0)
+ for (i = i - 1; i >= 0; i--)
+ switch (pdata->leds[i].type) {
+
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int __devinit lp3944_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
+ struct lp3944_data *data;
+
+ if (lp3944_pdata == NULL) {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ /* Let's see whether this adapter can support what we need. */
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "insufficient functionality!\n");
+ return -ENODEV;
+ }
+
+ data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ i2c_set_clientdata(client, data);
+
+ mutex_init(&data->lock);
+
+ dev_info(&client->dev, "lp3944 enabled\n");
+
+ lp3944_configure(client, data, lp3944_pdata);
+ return 0;
+}
+
+static int __devexit lp3944_remove(struct i2c_client *client)
+{
+ struct lp3944_platform_data *pdata = client->dev.platform_data;
+ struct lp3944_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < pdata->leds_size; i++)
+ switch (data->leds[i].type) {
+ case LP3944_LED_TYPE_LED:
+ case LP3944_LED_TYPE_LED_INVERTED:
+ led_classdev_unregister(&data->leds[i].ldev);
+ cancel_work_sync(&data->leds[i].work);
+ break;
+
+ case LP3944_LED_TYPE_NONE:
+ default:
+ break;
+ }
+
+ kfree(data);
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+/* lp3944 i2c driver struct */
+static const struct i2c_device_id lp3944_id[] = {
+ {"lp3944", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lp3944_id);
+
+static struct i2c_driver lp3944_driver = {
+ .driver = {
+ .name = "lp3944",
+ },
+ .probe = lp3944_probe,
+ .remove = __devexit_p(lp3944_remove),
+ .id_table = lp3944_id,
+};
+
+static int __init lp3944_module_init(void)
+{
+ return i2c_add_driver(&lp3944_driver);
+}
+
+static void __exit lp3944_module_exit(void)
+{
+ i2c_del_driver(&lp3944_driver);
+}
+
+module_init(lp3944_module_init);
+module_exit(lp3944_module_exit);
+
+MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
+MODULE_DESCRIPTION("LP3944 Fun Light Chip");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 3937244..dba8921 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -35,7 +35,7 @@
struct pca9532_led leds[16];
struct mutex update_lock;
struct input_dev *idev;
- struct work_struct work;
+ struct work_struct work;
u8 pwm[2];
u8 psc[2];
};
@@ -87,14 +87,14 @@
if (b > 0xFF)
return -EINVAL;
data->pwm[pwm] = b;
- data->psc[pwm] = blink;
- return 0;
+ data->psc[pwm] = blink;
+ return 0;
}
static int pca9532_setpwm(struct i2c_client *client, int pwm)
{
- struct pca9532_data *data = i2c_get_clientdata(client);
- mutex_lock(&data->update_lock);
+ struct pca9532_data *data = i2c_get_clientdata(client);
+ mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm),
data->pwm[pwm]);
i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm),
@@ -132,11 +132,11 @@
led->state = PCA9532_ON;
else {
led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */
- err = pca9532_calcpwm(led->client, 0, 0, value);
+ err = pca9532_calcpwm(led->client, 0, 0, value);
if (err)
return; /* XXX: led api doesn't allow error code? */
}
- schedule_work(&led->work);
+ schedule_work(&led->work);
}
static int pca9532_set_blink(struct led_classdev *led_cdev,
@@ -145,7 +145,7 @@
struct pca9532_led *led = ldev_to_led(led_cdev);
struct i2c_client *client = led->client;
int psc;
- int err = 0;
+ int err = 0;
if (*delay_on == 0 && *delay_off == 0) {
/* led subsystem ask us for a blink rate */
@@ -157,11 +157,11 @@
/* Thecus specific: only use PSC/PWM 0 */
psc = (*delay_on * 152-1)/1000;
- err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
- if (err)
- return err;
- schedule_work(&led->work);
- return 0;
+ err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness);
+ if (err)
+ return err;
+ schedule_work(&led->work);
+ return 0;
}
static int pca9532_event(struct input_dev *dev, unsigned int type,
@@ -178,15 +178,15 @@
else
data->pwm[1] = 0;
- schedule_work(&data->work);
+ schedule_work(&data->work);
- return 0;
+ return 0;
}
static void pca9532_input_work(struct work_struct *work)
{
- struct pca9532_data *data;
- data = container_of(work, struct pca9532_data, work);
+ struct pca9532_data *data;
+ data = container_of(work, struct pca9532_data, work);
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1),
data->pwm[1]);
@@ -195,11 +195,11 @@
static void pca9532_led_work(struct work_struct *work)
{
- struct pca9532_led *led;
- led = container_of(work, struct pca9532_led, work);
- if (led->state == PCA9532_PWM0)
- pca9532_setpwm(led->client, 0);
- pca9532_setled(led);
+ struct pca9532_led *led;
+ led = container_of(work, struct pca9532_led, work);
+ if (led->state == PCA9532_PWM0)
+ pca9532_setpwm(led->client, 0);
+ pca9532_setled(led);
}
static int pca9532_configure(struct i2c_client *client,
@@ -232,7 +232,7 @@
led->ldev.brightness = LED_OFF;
led->ldev.brightness_set = pca9532_set_brightness;
led->ldev.blink_set = pca9532_set_blink;
- INIT_WORK(&led->work, pca9532_led_work);
+ INIT_WORK(&led->work, pca9532_led_work);
err = led_classdev_register(&client->dev, &led->ldev);
if (err < 0) {
dev_err(&client->dev,
@@ -262,11 +262,11 @@
BIT_MASK(SND_TONE);
data->idev->event = pca9532_event;
input_set_drvdata(data->idev, data);
- INIT_WORK(&data->work, pca9532_input_work);
+ INIT_WORK(&data->work, pca9532_input_work);
err = input_register_device(data->idev);
if (err) {
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
goto exit;
}
@@ -283,13 +283,13 @@
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
@@ -340,13 +340,13 @@
break;
case PCA9532_TYPE_LED:
led_classdev_unregister(&data->leds[i].ldev);
- cancel_work_sync(&data->leds[i].work);
+ cancel_work_sync(&data->leds[i].work);
break;
case PCA9532_TYPE_N2100_BEEP:
if (data->idev != NULL) {
input_unregister_device(data->idev);
input_free_device(data->idev);
- cancel_work_sync(&data->work);
+ cancel_work_sync(&data->work);
data->idev = NULL;
}
break;
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 6e149f4..a0f6838 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -378,6 +378,17 @@
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
+#ifdef CONFIG_PCI
+ /* Set the DMA ops to the ones from the PCI device, this could be
+ * fishy if we didn't know that on PowerMac it's always direct ops
+ * or iommu ops that will work fine
+ */
+ dev->ofdev.dev.archdata.dma_ops =
+ chip->lbus.pdev->dev.archdata.dma_ops;
+ dev->ofdev.dev.archdata.dma_data =
+ chip->lbus.pdev->dev.archdata.dma_data;
+#endif /* CONFIG_PCI */
+
#ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 36e0675..020f957 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -231,6 +231,17 @@
Allow volume managers to mirror logical volumes, also
needed for live data migration tools such as 'pvmove'.
+config DM_LOG_USERSPACE
+ tristate "Mirror userspace logging (EXPERIMENTAL)"
+ depends on DM_MIRROR && EXPERIMENTAL && NET
+ select CONNECTOR
+ ---help---
+ The userspace logging module provides a mechanism for
+ relaying the dm-dirty-log API to userspace. Log designs
+ which are more suited to userspace implementation (e.g.
+ shared storage logs) or experimental logs can be implemented
+ by leveraging this framework.
+
config DM_ZERO
tristate "Zero target"
depends on BLK_DEV_DM
@@ -249,6 +260,25 @@
---help---
Allow volume managers to support multipath hardware.
+config DM_MULTIPATH_QL
+ tristate "I/O Path Selector based on the number of in-flight I/Os"
+ depends on DM_MULTIPATH
+ ---help---
+ This path selector is a dynamic load balancer which selects
+ the path with the least number of in-flight I/Os.
+
+ If unsure, say N.
+
+config DM_MULTIPATH_ST
+ tristate "I/O Path Selector based on the service time"
+ depends on DM_MULTIPATH
+ ---help---
+ This path selector is a dynamic load balancer which selects
+ the path expected to complete the incoming I/O in the shortest
+ time.
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 45cc595..1dc4185 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -8,6 +8,8 @@
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
dm-mirror-y += dm-raid1.o
+dm-log-userspace-y \
+ += dm-log-userspace-base.o dm-log-userspace-transfer.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
@@ -36,8 +38,11 @@
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
+obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
+obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
+obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 53394e8..9933eb8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1132,6 +1132,7 @@
goto bad_crypt_queue;
}
+ ti->num_flush_requests = 1;
ti->private = cc;
return 0;
@@ -1189,6 +1190,13 @@
union map_info *map_context)
{
struct dm_crypt_io *io;
+ struct crypt_config *cc;
+
+ if (unlikely(bio_empty_barrier(bio))) {
+ cc = ti->private;
+ bio->bi_bdev = cc->dev->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
@@ -1305,9 +1313,17 @@
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
+static int crypt_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct crypt_config *cc = ti->private;
+
+ return fn(ti, cc->dev, cc->start, data);
+}
+
static struct target_type crypt_target = {
.name = "crypt",
- .version= {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
@@ -1318,6 +1334,7 @@
.resume = crypt_resume,
.message = crypt_message,
.merge = crypt_merge,
+ .iterate_devices = crypt_iterate_devices,
};
static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 559dbb5..4e5b843 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -197,6 +197,7 @@
mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
+ ti->num_flush_requests = 1;
ti->private = dc;
return 0;
@@ -278,8 +279,9 @@
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev;
- bio->bi_sector = dc->start_write +
- (bio->bi_sector - ti->begin);
+ if (bio_sectors(bio))
+ bio->bi_sector = dc->start_write +
+ (bio->bi_sector - ti->begin);
return delay_bio(dc, dc->write_delay, bio);
}
@@ -316,9 +318,26 @@
return 0;
}
+static int delay_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct delay_c *dc = ti->private;
+ int ret = 0;
+
+ ret = fn(ti, dc->dev_read, dc->start_read, data);
+ if (ret)
+ goto out;
+
+ if (dc->dev_write)
+ ret = fn(ti, dc->dev_write, dc->start_write, data);
+
+out:
+ return ret;
+}
+
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 0, 2},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
@@ -326,6 +345,7 @@
.presuspend = delay_presuspend,
.resume = delay_resume,
.status = delay_status,
+ .iterate_devices = delay_iterate_devices,
};
static int __init dm_delay_init(void)
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 75d8081..c3ae515 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -216,7 +216,7 @@
return -EINVAL;
}
- type = get_type(argv[1]);
+ type = get_type(&persistent);
if (!type) {
ti->error = "Exception store type not recognised";
r = -EINVAL;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index c92701d..2442c8c 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -156,7 +156,7 @@
*/
static inline sector_t get_dev_size(struct block_device *bdev)
{
- return bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
}
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index e73aabd..3a2e6a2 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -22,6 +22,7 @@
/* FIXME: can we shrink this ? */
struct io {
unsigned long error_bits;
+ unsigned long eopnotsupp_bits;
atomic_t count;
struct task_struct *sleeper;
struct dm_io_client *client;
@@ -107,8 +108,11 @@
*---------------------------------------------------------------*/
static void dec_count(struct io *io, unsigned int region, int error)
{
- if (error)
+ if (error) {
set_bit(region, &io->error_bits);
+ if (error == -EOPNOTSUPP)
+ set_bit(region, &io->eopnotsupp_bits);
+ }
if (atomic_dec_and_test(&io->count)) {
if (io->sleeper)
@@ -360,7 +364,9 @@
return -EIO;
}
+retry:
io.error_bits = 0;
+ io.eopnotsupp_bits = 0;
atomic_set(&io.count, 1); /* see dispatch_io() */
io.sleeper = current;
io.client = client;
@@ -377,6 +383,11 @@
}
set_current_state(TASK_RUNNING);
+ if (io.eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
+ rw &= ~(1 << BIO_RW_BARRIER);
+ goto retry;
+ }
+
if (error_bits)
*error_bits = io.error_bits;
@@ -397,6 +408,7 @@
io = mempool_alloc(client->pool, GFP_NOIO);
io->error_bits = 0;
+ io->eopnotsupp_bits = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->sleeper = NULL;
io->client = client;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1128d3f..7f77f18 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -276,7 +276,7 @@
up_write(&_hash_lock);
}
-static int dm_hash_rename(const char *old, const char *new)
+static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
@@ -333,7 +333,7 @@
dm_table_put(table);
}
- dm_kobject_uevent(hc->md);
+ dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
dm_put(hc->md);
up_write(&_hash_lock);
@@ -680,6 +680,9 @@
__hash_remove(hc);
up_write(&_hash_lock);
+
+ dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
+
dm_put(md);
param->data_size = 0;
return 0;
@@ -715,7 +718,7 @@
return r;
param->data_size = 0;
- return dm_hash_rename(param->name, new_name);
+ return dm_hash_rename(param->event_nr, param->name, new_name);
}
static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
@@ -842,8 +845,11 @@
if (dm_suspended(md))
r = dm_resume(md);
- if (!r)
+
+ if (!r) {
+ dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
r = __dev_status(md, param);
+ }
dm_put(md);
return r;
@@ -1044,6 +1050,12 @@
next = spec->next;
}
+ r = dm_table_set_type(table);
+ if (r) {
+ DMWARN("unable to set table type");
+ return r;
+ }
+
return dm_table_complete(table);
}
@@ -1089,6 +1101,13 @@
goto out;
}
+ r = dm_table_alloc_md_mempools(t);
+ if (r) {
+ DMWARN("unable to allocate mempools for this table");
+ dm_table_destroy(t);
+ goto out;
+ }
+
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 79fb53e..9184b6de 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,6 +53,7 @@
goto bad;
}
+ ti->num_flush_requests = 1;
ti->private = lc;
return 0;
@@ -81,7 +82,8 @@
struct linear_c *lc = ti->private;
bio->bi_bdev = lc->dev->bdev;
- bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
+ if (bio_sectors(bio))
+ bio->bi_sector = linear_map_sector(ti, bio->bi_sector);
}
static int linear_map(struct dm_target *ti, struct bio *bio,
@@ -132,9 +134,17 @@
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
+static int linear_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct linear_c *lc = ti->private;
+
+ return fn(ti, lc->dev, lc->start, data);
+}
+
static struct target_type linear_target = {
.name = "linear",
- .version= {1, 0, 3},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
@@ -142,6 +152,7 @@
.status = linear_status,
.ioctl = linear_ioctl,
.merge = linear_merge,
+ .iterate_devices = linear_iterate_devices,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
new file mode 100644
index 0000000..e69b965
--- /dev/null
+++ b/drivers/md/dm-log-userspace-base.c
@@ -0,0 +1,696 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#include <linux/bio.h>
+#include <linux/dm-dirty-log.h>
+#include <linux/device-mapper.h>
+#include <linux/dm-log-userspace.h>
+
+#include "dm-log-userspace-transfer.h"
+
+struct flush_entry {
+ int type;
+ region_t region;
+ struct list_head list;
+};
+
+struct log_c {
+ struct dm_target *ti;
+ uint32_t region_size;
+ region_t region_count;
+ char uuid[DM_UUID_LEN];
+
+ char *usr_argv_str;
+ uint32_t usr_argc;
+
+ /*
+ * in_sync_hint gets set when doing is_remote_recovering. It
+ * represents the first region that needs recovery. IOW, the
+ * first zero bit of sync_bits. This can be useful for to limit
+ * traffic for calls like is_remote_recovering and get_resync_work,
+ * but be take care in its use for anything else.
+ */
+ uint64_t in_sync_hint;
+
+ spinlock_t flush_lock;
+ struct list_head flush_list; /* only for clear and mark requests */
+};
+
+static mempool_t *flush_entry_pool;
+
+static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ return kmalloc(sizeof(struct flush_entry), gfp_mask);
+}
+
+static void flush_entry_free(void *element, void *pool_data)
+{
+ kfree(element);
+}
+
+static int userspace_do_request(struct log_c *lc, const char *uuid,
+ int request_type, char *data, size_t data_size,
+ char *rdata, size_t *rdata_size)
+{
+ int r;
+
+ /*
+ * If the server isn't there, -ESRCH is returned,
+ * and we must keep trying until the server is
+ * restored.
+ */
+retry:
+ r = dm_consult_userspace(uuid, request_type, data,
+ data_size, rdata, rdata_size);
+
+ if (r != -ESRCH)
+ return r;
+
+ DMERR(" Userspace log server not found.");
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2*HZ);
+ DMWARN("Attempting to contact userspace log server...");
+ r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str,
+ strlen(lc->usr_argv_str) + 1,
+ NULL, NULL);
+ if (!r)
+ break;
+ }
+ DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
+ r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL,
+ 0, NULL, NULL);
+ if (!r)
+ goto retry;
+
+ DMERR("Error trying to resume userspace log: %d", r);
+
+ return -ESRCH;
+}
+
+static int build_constructor_string(struct dm_target *ti,
+ unsigned argc, char **argv,
+ char **ctr_str)
+{
+ int i, str_size;
+ char *str = NULL;
+
+ *ctr_str = NULL;
+
+ for (i = 0, str_size = 0; i < argc; i++)
+ str_size += strlen(argv[i]) + 1; /* +1 for space between args */
+
+ str_size += 20; /* Max number of chars in a printed u64 number */
+
+ str = kzalloc(str_size, GFP_KERNEL);
+ if (!str) {
+ DMWARN("Unable to allocate memory for constructor string");
+ return -ENOMEM;
+ }
+
+ for (i = 0, str_size = 0; i < argc; i++)
+ str_size += sprintf(str + str_size, "%s ", argv[i]);
+ str_size += sprintf(str + str_size, "%llu",
+ (unsigned long long)ti->len);
+
+ *ctr_str = str;
+ return str_size;
+}
+
+/*
+ * userspace_ctr
+ *
+ * argv contains:
+ * <UUID> <other args>
+ * Where 'other args' is the userspace implementation specific log
+ * arguments. An example might be:
+ * <UUID> clustered_disk <arg count> <log dev> <region_size> [[no]sync]
+ *
+ * So, this module will strip off the <UUID> for identification purposes
+ * when communicating with userspace about a log; but will pass on everything
+ * else.
+ */
+static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
+ unsigned argc, char **argv)
+{
+ int r = 0;
+ int str_size;
+ char *ctr_str = NULL;
+ struct log_c *lc = NULL;
+ uint64_t rdata;
+ size_t rdata_size = sizeof(rdata);
+
+ if (argc < 3) {
+ DMWARN("Too few arguments to userspace dirty log");
+ return -EINVAL;
+ }
+
+ lc = kmalloc(sizeof(*lc), GFP_KERNEL);
+ if (!lc) {
+ DMWARN("Unable to allocate userspace log context.");
+ return -ENOMEM;
+ }
+
+ lc->ti = ti;
+
+ if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
+ DMWARN("UUID argument too long.");
+ kfree(lc);
+ return -EINVAL;
+ }
+
+ strncpy(lc->uuid, argv[0], DM_UUID_LEN);
+ spin_lock_init(&lc->flush_lock);
+ INIT_LIST_HEAD(&lc->flush_list);
+
+ str_size = build_constructor_string(ti, argc - 1, argv + 1, &ctr_str);
+ if (str_size < 0) {
+ kfree(lc);
+ return str_size;
+ }
+
+ /* Send table string */
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR,
+ ctr_str, str_size, NULL, NULL);
+
+ if (r == -ESRCH) {
+ DMERR("Userspace log server not found");
+ goto out;
+ }
+
+ /* Since the region size does not change, get it now */
+ rdata_size = sizeof(rdata);
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE,
+ NULL, 0, (char *)&rdata, &rdata_size);
+
+ if (r) {
+ DMERR("Failed to get region size of dirty log");
+ goto out;
+ }
+
+ lc->region_size = (uint32_t)rdata;
+ lc->region_count = dm_sector_div_up(ti->len, lc->region_size);
+
+out:
+ if (r) {
+ kfree(lc);
+ kfree(ctr_str);
+ } else {
+ lc->usr_argv_str = ctr_str;
+ lc->usr_argc = argc;
+ log->context = lc;
+ }
+
+ return r;
+}
+
+static void userspace_dtr(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR,
+ NULL, 0,
+ NULL, NULL);
+
+ kfree(lc->usr_argv_str);
+ kfree(lc);
+
+ return;
+}
+
+static int userspace_presuspend(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static int userspace_postsuspend(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static int userspace_resume(struct dm_dirty_log *log)
+{
+ int r;
+ struct log_c *lc = log->context;
+
+ lc->in_sync_hint = 0;
+ r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME,
+ NULL, 0,
+ NULL, NULL);
+
+ return r;
+}
+
+static uint32_t userspace_get_region_size(struct dm_dirty_log *log)
+{
+ struct log_c *lc = log->context;
+
+ return lc->region_size;
+}
+
+/*
+ * userspace_is_clean
+ *
+ * Check whether a region is clean. If there is any sort of
+ * failure when consulting the server, we return not clean.
+ *
+ * Returns: 1 if clean, 0 otherwise
+ */
+static int userspace_is_clean(struct dm_dirty_log *log, region_t region)
+{
+ int r;
+ uint64_t region64 = (uint64_t)region;
+ int64_t is_clean;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+
+ rdata_size = sizeof(is_clean);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_CLEAN,
+ (char *)®ion64, sizeof(region64),
+ (char *)&is_clean, &rdata_size);
+
+ return (r) ? 0 : (int)is_clean;
+}
+
+/*
+ * userspace_in_sync
+ *
+ * Check if the region is in-sync. If there is any sort
+ * of failure when consulting the server, we assume that
+ * the region is not in sync.
+ *
+ * If 'can_block' is set, return immediately
+ *
+ * Returns: 1 if in-sync, 0 if not-in-sync, -EWOULDBLOCK
+ */
+static int userspace_in_sync(struct dm_dirty_log *log, region_t region,
+ int can_block)
+{
+ int r;
+ uint64_t region64 = region;
+ int64_t in_sync;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+
+ /*
+ * We can never respond directly - even if in_sync_hint is
+ * set. This is because another machine could see a device
+ * failure and mark the region out-of-sync. If we don't go
+ * to userspace to ask, we might think the region is in-sync
+ * and allow a read to pick up data that is stale. (This is
+ * very unlikely if a device actually fails; but it is very
+ * likely if a connection to one device from one machine fails.)
+ *
+ * There still might be a problem if the mirror caches the region
+ * state as in-sync... but then this call would not be made. So,
+ * that is a mirror problem.
+ */
+ if (!can_block)
+ return -EWOULDBLOCK;
+
+ rdata_size = sizeof(in_sync);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IN_SYNC,
+ (char *)®ion64, sizeof(region64),
+ (char *)&in_sync, &rdata_size);
+ return (r) ? 0 : (int)in_sync;
+}
+
+/*
+ * userspace_flush
+ *
+ * This function is ok to block.
+ * The flush happens in two stages. First, it sends all
+ * clear/mark requests that are on the list. Then it
+ * tells the server to commit them. This gives the
+ * server a chance to optimise the commit, instead of
+ * doing it for every request.
+ *
+ * Additionally, we could implement another thread that
+ * sends the requests up to the server - reducing the
+ * load on flush. Then the flush would have less in
+ * the list and be responsible for the finishing commit.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+static int userspace_flush(struct dm_dirty_log *log)
+{
+ int r = 0;
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ LIST_HEAD(flush_list);
+ struct flush_entry *fe, *tmp_fe;
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ list_splice_init(&lc->flush_list, &flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ if (list_empty(&flush_list))
+ return 0;
+
+ /*
+ * FIXME: Count up requests, group request types,
+ * allocate memory to stick all requests in and
+ * send to server in one go. Failing the allocation,
+ * do it one by one.
+ */
+
+ list_for_each_entry(fe, &flush_list, list) {
+ r = userspace_do_request(lc, lc->uuid, fe->type,
+ (char *)&fe->region,
+ sizeof(fe->region),
+ NULL, NULL);
+ if (r)
+ goto fail;
+ }
+
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_FLUSH,
+ NULL, 0, NULL, NULL);
+
+fail:
+ /*
+ * We can safely remove these entries, even if failure.
+ * Calling code will receive an error and will know that
+ * the log facility has failed.
+ */
+ list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) {
+ list_del(&fe->list);
+ mempool_free(fe, flush_entry_pool);
+ }
+
+ if (r)
+ dm_table_event(lc->ti->table);
+
+ return r;
+}
+
+/*
+ * userspace_mark_region
+ *
+ * This function should avoid blocking unless absolutely required.
+ * (Memory allocation is valid for blocking.)
+ */
+static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
+{
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ struct flush_entry *fe;
+
+ /* Wait for an allocation, but _never_ fail */
+ fe = mempool_alloc(flush_entry_pool, GFP_NOIO);
+ BUG_ON(!fe);
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ fe->type = DM_ULOG_MARK_REGION;
+ fe->region = region;
+ list_add(&fe->list, &lc->flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ return;
+}
+
+/*
+ * userspace_clear_region
+ *
+ * This function must not block.
+ * So, the alloc can't block. In the worst case, it is ok to
+ * fail. It would simply mean we can't clear the region.
+ * Does nothing to current sync context, but does mean
+ * the region will be re-sync'ed on a reload of the mirror
+ * even though it is in-sync.
+ */
+static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
+{
+ unsigned long flags;
+ struct log_c *lc = log->context;
+ struct flush_entry *fe;
+
+ /*
+ * If we fail to allocate, we skip the clearing of
+ * the region. This doesn't hurt us in any way, except
+ * to cause the region to be resync'ed when the
+ * device is activated next time.
+ */
+ fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC);
+ if (!fe) {
+ DMERR("Failed to allocate memory to clear region.");
+ return;
+ }
+
+ spin_lock_irqsave(&lc->flush_lock, flags);
+ fe->type = DM_ULOG_CLEAR_REGION;
+ fe->region = region;
+ list_add(&fe->list, &lc->flush_list);
+ spin_unlock_irqrestore(&lc->flush_lock, flags);
+
+ return;
+}
+
+/*
+ * userspace_get_resync_work
+ *
+ * Get a region that needs recovery. It is valid to return
+ * an error for this function.
+ *
+ * Returns: 1 if region filled, 0 if no work, <0 on error
+ */
+static int userspace_get_resync_work(struct dm_dirty_log *log, region_t *region)
+{
+ int r;
+ size_t rdata_size;
+ struct log_c *lc = log->context;
+ struct {
+ int64_t i; /* 64-bit for mix arch compatibility */
+ region_t r;
+ } pkg;
+
+ if (lc->in_sync_hint >= lc->region_count)
+ return 0;
+
+ rdata_size = sizeof(pkg);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_RESYNC_WORK,
+ NULL, 0,
+ (char *)&pkg, &rdata_size);
+
+ *region = pkg.r;
+ return (r) ? r : (int)pkg.i;
+}
+
+/*
+ * userspace_set_region_sync
+ *
+ * Set the sync status of a given region. This function
+ * must not fail.
+ */
+static void userspace_set_region_sync(struct dm_dirty_log *log,
+ region_t region, int in_sync)
+{
+ int r;
+ struct log_c *lc = log->context;
+ struct {
+ region_t r;
+ int64_t i;
+ } pkg;
+
+ pkg.r = region;
+ pkg.i = (int64_t)in_sync;
+
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_SET_REGION_SYNC,
+ (char *)&pkg, sizeof(pkg),
+ NULL, NULL);
+
+ /*
+ * It would be nice to be able to report failures.
+ * However, it is easy emough to detect and resolve.
+ */
+ return;
+}
+
+/*
+ * userspace_get_sync_count
+ *
+ * If there is any sort of failure when consulting the server,
+ * we assume that the sync count is zero.
+ *
+ * Returns: sync count on success, 0 on failure
+ */
+static region_t userspace_get_sync_count(struct dm_dirty_log *log)
+{
+ int r;
+ size_t rdata_size;
+ uint64_t sync_count;
+ struct log_c *lc = log->context;
+
+ rdata_size = sizeof(sync_count);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_GET_SYNC_COUNT,
+ NULL, 0,
+ (char *)&sync_count, &rdata_size);
+
+ if (r)
+ return 0;
+
+ if (sync_count >= lc->region_count)
+ lc->in_sync_hint = lc->region_count;
+
+ return (region_t)sync_count;
+}
+
+/*
+ * userspace_status
+ *
+ * Returns: amount of space consumed
+ */
+static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
+ char *result, unsigned maxlen)
+{
+ int r = 0;
+ size_t sz = (size_t)maxlen;
+ struct log_c *lc = log->context;
+
+ switch (status_type) {
+ case STATUSTYPE_INFO:
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_STATUS_INFO,
+ NULL, 0,
+ result, &sz);
+
+ if (r) {
+ sz = 0;
+ DMEMIT("%s 1 COM_FAILURE", log->type->name);
+ }
+ break;
+ case STATUSTYPE_TABLE:
+ sz = 0;
+ DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1,
+ lc->uuid, lc->usr_argv_str);
+ break;
+ }
+ return (r) ? 0 : (int)sz;
+}
+
+/*
+ * userspace_is_remote_recovering
+ *
+ * Returns: 1 if region recovering, 0 otherwise
+ */
+static int userspace_is_remote_recovering(struct dm_dirty_log *log,
+ region_t region)
+{
+ int r;
+ uint64_t region64 = region;
+ struct log_c *lc = log->context;
+ static unsigned long long limit;
+ struct {
+ int64_t is_recovering;
+ uint64_t in_sync_hint;
+ } pkg;
+ size_t rdata_size = sizeof(pkg);
+
+ /*
+ * Once the mirror has been reported to be in-sync,
+ * it will never again ask for recovery work. So,
+ * we can safely say there is not a remote machine
+ * recovering if the device is in-sync. (in_sync_hint
+ * must be reset at resume time.)
+ */
+ if (region < lc->in_sync_hint)
+ return 0;
+ else if (jiffies < limit)
+ return 1;
+
+ limit = jiffies + (HZ / 4);
+ r = userspace_do_request(lc, lc->uuid, DM_ULOG_IS_REMOTE_RECOVERING,
+ (char *)®ion64, sizeof(region64),
+ (char *)&pkg, &rdata_size);
+ if (r)
+ return 1;
+
+ lc->in_sync_hint = pkg.in_sync_hint;
+
+ return (int)pkg.is_recovering;
+}
+
+static struct dm_dirty_log_type _userspace_type = {
+ .name = "userspace",
+ .module = THIS_MODULE,
+ .ctr = userspace_ctr,
+ .dtr = userspace_dtr,
+ .presuspend = userspace_presuspend,
+ .postsuspend = userspace_postsuspend,
+ .resume = userspace_resume,
+ .get_region_size = userspace_get_region_size,
+ .is_clean = userspace_is_clean,
+ .in_sync = userspace_in_sync,
+ .flush = userspace_flush,
+ .mark_region = userspace_mark_region,
+ .clear_region = userspace_clear_region,
+ .get_resync_work = userspace_get_resync_work,
+ .set_region_sync = userspace_set_region_sync,
+ .get_sync_count = userspace_get_sync_count,
+ .status = userspace_status,
+ .is_remote_recovering = userspace_is_remote_recovering,
+};
+
+static int __init userspace_dirty_log_init(void)
+{
+ int r = 0;
+
+ flush_entry_pool = mempool_create(100, flush_entry_alloc,
+ flush_entry_free, NULL);
+
+ if (!flush_entry_pool) {
+ DMWARN("Unable to create flush_entry_pool: No memory.");
+ return -ENOMEM;
+ }
+
+ r = dm_ulog_tfr_init();
+ if (r) {
+ DMWARN("Unable to initialize userspace log communications");
+ mempool_destroy(flush_entry_pool);
+ return r;
+ }
+
+ r = dm_dirty_log_type_register(&_userspace_type);
+ if (r) {
+ DMWARN("Couldn't register userspace dirty log type");
+ dm_ulog_tfr_exit();
+ mempool_destroy(flush_entry_pool);
+ return r;
+ }
+
+ DMINFO("version 1.0.0 loaded");
+ return 0;
+}
+
+static void __exit userspace_dirty_log_exit(void)
+{
+ dm_dirty_log_type_unregister(&_userspace_type);
+ dm_ulog_tfr_exit();
+ mempool_destroy(flush_entry_pool);
+
+ DMINFO("version 1.0.0 unloaded");
+ return;
+}
+
+module_init(userspace_dirty_log_init);
+module_exit(userspace_dirty_log_exit);
+
+MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
+MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
new file mode 100644
index 0000000..0ca1ee7
--- /dev/null
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <linux/connector.h>
+#include <linux/device-mapper.h>
+#include <linux/dm-log-userspace.h>
+
+#include "dm-log-userspace-transfer.h"
+
+static uint32_t dm_ulog_seq;
+
+/*
+ * Netlink/Connector is an unreliable protocol. How long should
+ * we wait for a response before assuming it was lost and retrying?
+ * (If we do receive a response after this time, it will be discarded
+ * and the response to the resent request will be waited for.
+ */
+#define DM_ULOG_RETRY_TIMEOUT (15 * HZ)
+
+/*
+ * Pre-allocated space for speed
+ */
+#define DM_ULOG_PREALLOCED_SIZE 512
+static struct cn_msg *prealloced_cn_msg;
+static struct dm_ulog_request *prealloced_ulog_tfr;
+
+static struct cb_id ulog_cn_id = {
+ .idx = CN_IDX_DM,
+ .val = CN_VAL_DM_USERSPACE_LOG
+};
+
+static DEFINE_MUTEX(dm_ulog_lock);
+
+struct receiving_pkg {
+ struct list_head list;
+ struct completion complete;
+
+ uint32_t seq;
+
+ int error;
+ size_t *data_size;
+ char *data;
+};
+
+static DEFINE_SPINLOCK(receiving_list_lock);
+static struct list_head receiving_list;
+
+static int dm_ulog_sendto_server(struct dm_ulog_request *tfr)
+{
+ int r;
+ struct cn_msg *msg = prealloced_cn_msg;
+
+ memset(msg, 0, sizeof(struct cn_msg));
+
+ msg->id.idx = ulog_cn_id.idx;
+ msg->id.val = ulog_cn_id.val;
+ msg->ack = 0;
+ msg->seq = tfr->seq;
+ msg->len = sizeof(struct dm_ulog_request) + tfr->data_size;
+
+ r = cn_netlink_send(msg, 0, gfp_any());
+
+ return r;
+}
+
+/*
+ * Parameters for this function can be either msg or tfr, but not
+ * both. This function fills in the reply for a waiting request.
+ * If just msg is given, then the reply is simply an ACK from userspace
+ * that the request was received.
+ *
+ * Returns: 0 on success, -ENOENT on failure
+ */
+static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
+{
+ uint32_t rtn_seq = (msg) ? msg->seq : (tfr) ? tfr->seq : 0;
+ struct receiving_pkg *pkg;
+
+ /*
+ * The 'receiving_pkg' entries in this list are statically
+ * allocated on the stack in 'dm_consult_userspace'.
+ * Each process that is waiting for a reply from the user
+ * space server will have an entry in this list.
+ *
+ * We are safe to do it this way because the stack space
+ * is unique to each process, but still addressable by
+ * other processes.
+ */
+ list_for_each_entry(pkg, &receiving_list, list) {
+ if (rtn_seq != pkg->seq)
+ continue;
+
+ if (msg) {
+ pkg->error = -msg->ack;
+ /*
+ * If we are trying again, we will need to know our
+ * storage capacity. Otherwise, along with the
+ * error code, we make explicit that we have no data.
+ */
+ if (pkg->error != -EAGAIN)
+ *(pkg->data_size) = 0;
+ } else if (tfr->data_size > *(pkg->data_size)) {
+ DMERR("Insufficient space to receive package [%u] "
+ "(%u vs %lu)", tfr->request_type,
+ tfr->data_size, *(pkg->data_size));
+
+ *(pkg->data_size) = 0;
+ pkg->error = -ENOSPC;
+ } else {
+ pkg->error = tfr->error;
+ memcpy(pkg->data, tfr->data, tfr->data_size);
+ *(pkg->data_size) = tfr->data_size;
+ }
+ complete(&pkg->complete);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * This is the connector callback that delivers data
+ * that was sent from userspace.
+ */
+static void cn_ulog_callback(void *data)
+{
+ struct cn_msg *msg = (struct cn_msg *)data;
+ struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
+
+ spin_lock(&receiving_list_lock);
+ if (msg->len == 0)
+ fill_pkg(msg, NULL);
+ else if (msg->len < sizeof(*tfr))
+ DMERR("Incomplete message received (expected %u, got %u): [%u]",
+ (unsigned)sizeof(*tfr), msg->len, msg->seq);
+ else
+ fill_pkg(NULL, tfr);
+ spin_unlock(&receiving_list_lock);
+}
+
+/**
+ * dm_consult_userspace
+ * @uuid: log's uuid (must be DM_UUID_LEN in size)
+ * @request_type: found in include/linux/dm-log-userspace.h
+ * @data: data to tx to the server
+ * @data_size: size of data in bytes
+ * @rdata: place to put return data from server
+ * @rdata_size: value-result (amount of space given/amount of space used)
+ *
+ * rdata_size is undefined on failure.
+ *
+ * Memory used to communicate with userspace is zero'ed
+ * before populating to ensure that no unwanted bits leak
+ * from kernel space to user-space. All userspace log communications
+ * between kernel and user space go through this function.
+ *
+ * Returns: 0 on success, -EXXX on failure
+ **/
+int dm_consult_userspace(const char *uuid, int request_type,
+ char *data, size_t data_size,
+ char *rdata, size_t *rdata_size)
+{
+ int r = 0;
+ size_t dummy = 0;
+ int overhead_size =
+ sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
+ struct dm_ulog_request *tfr = prealloced_ulog_tfr;
+ struct receiving_pkg pkg;
+
+ if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
+ DMINFO("Size of tfr exceeds preallocated size");
+ return -EINVAL;
+ }
+
+ if (!rdata_size)
+ rdata_size = &dummy;
+resend:
+ /*
+ * We serialize the sending of requests so we can
+ * use the preallocated space.
+ */
+ mutex_lock(&dm_ulog_lock);
+
+ memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
+ memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+ tfr->seq = dm_ulog_seq++;
+
+ /*
+ * Must be valid request type (all other bits set to
+ * zero). This reserves other bits for possible future
+ * use.
+ */
+ tfr->request_type = request_type & DM_ULOG_REQUEST_MASK;
+
+ tfr->data_size = data_size;
+ if (data && data_size)
+ memcpy(tfr->data, data, data_size);
+
+ memset(&pkg, 0, sizeof(pkg));
+ init_completion(&pkg.complete);
+ pkg.seq = tfr->seq;
+ pkg.data_size = rdata_size;
+ pkg.data = rdata;
+ spin_lock(&receiving_list_lock);
+ list_add(&(pkg.list), &receiving_list);
+ spin_unlock(&receiving_list_lock);
+
+ r = dm_ulog_sendto_server(tfr);
+
+ mutex_unlock(&dm_ulog_lock);
+
+ if (r) {
+ DMERR("Unable to send log request [%u] to userspace: %d",
+ request_type, r);
+ spin_lock(&receiving_list_lock);
+ list_del_init(&(pkg.list));
+ spin_unlock(&receiving_list_lock);
+
+ goto out;
+ }
+
+ r = wait_for_completion_timeout(&(pkg.complete), DM_ULOG_RETRY_TIMEOUT);
+ spin_lock(&receiving_list_lock);
+ list_del_init(&(pkg.list));
+ spin_unlock(&receiving_list_lock);
+ if (!r) {
+ DMWARN("[%s] Request timed out: [%u/%u] - retrying",
+ (strlen(uuid) > 8) ?
+ (uuid + (strlen(uuid) - 8)) : (uuid),
+ request_type, pkg.seq);
+ goto resend;
+ }
+
+ r = pkg.error;
+ if (r == -EAGAIN)
+ goto resend;
+
+out:
+ return r;
+}
+
+int dm_ulog_tfr_init(void)
+{
+ int r;
+ void *prealloced;
+
+ INIT_LIST_HEAD(&receiving_list);
+
+ prealloced = kmalloc(DM_ULOG_PREALLOCED_SIZE, GFP_KERNEL);
+ if (!prealloced)
+ return -ENOMEM;
+
+ prealloced_cn_msg = prealloced;
+ prealloced_ulog_tfr = prealloced + sizeof(struct cn_msg);
+
+ r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
+ if (r) {
+ cn_del_callback(&ulog_cn_id);
+ return r;
+ }
+
+ return 0;
+}
+
+void dm_ulog_tfr_exit(void)
+{
+ cn_del_callback(&ulog_cn_id);
+ kfree(prealloced_cn_msg);
+}
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
new file mode 100644
index 0000000..c26d8e4
--- /dev/null
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef __DM_LOG_USERSPACE_TRANSFER_H__
+#define __DM_LOG_USERSPACE_TRANSFER_H__
+
+#define DM_MSG_PREFIX "dm-log-userspace"
+
+int dm_ulog_tfr_init(void);
+void dm_ulog_tfr_exit(void);
+int dm_consult_userspace(const char *uuid, int request_type,
+ char *data, size_t data_size,
+ char *rdata, size_t *rdata_size);
+
+#endif /* __DM_LOG_USERSPACE_TRANSFER_H__ */
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6fa8ccf..9443896 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -412,11 +412,12 @@
/*
* Buffer holds both header and bitset.
*/
- buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
- bitset_size,
- ti->limits.logical_block_size);
+ buf_size =
+ dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
+ bdev_logical_block_size(lc->header_location.
+ bdev));
- if (buf_size > dev->bdev->bd_inode->i_size) {
+ if (buf_size > i_size_read(dev->bdev->bd_inode)) {
DMWARN("log device %s too small: need %llu bytes",
dev->name, (unsigned long long)buf_size);
kfree(lc);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6a386ab..c70604a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -8,7 +8,6 @@
#include <linux/device-mapper.h>
#include "dm-path-selector.h"
-#include "dm-bio-record.h"
#include "dm-uevent.h"
#include <linux/ctype.h>
@@ -35,6 +34,7 @@
struct dm_path path;
struct work_struct deactivate_path;
+ struct work_struct activate_path;
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -64,8 +64,6 @@
spinlock_t lock;
const char *hw_handler_name;
- struct work_struct activate_path;
- struct pgpath *pgpath_to_activate;
unsigned nr_priority_groups;
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
@@ -84,7 +82,7 @@
unsigned pg_init_count; /* Number of times pg_init called */
struct work_struct process_queued_ios;
- struct bio_list queued_ios;
+ struct list_head queued_ios;
unsigned queue_size;
struct work_struct trigger_event;
@@ -101,7 +99,7 @@
*/
struct dm_mpath_io {
struct pgpath *pgpath;
- struct dm_bio_details details;
+ size_t nr_bytes;
};
typedef int (*action_fn) (struct pgpath *pgpath);
@@ -128,6 +126,7 @@
if (pgpath) {
pgpath->is_active = 1;
INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+ INIT_WORK(&pgpath->activate_path, activate_path);
}
return pgpath;
@@ -160,7 +159,6 @@
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{
- unsigned long flags;
struct pgpath *pgpath, *tmp;
struct multipath *m = ti->private;
@@ -169,10 +167,6 @@
if (m->hw_handler_name)
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev);
- spin_lock_irqsave(&m->lock, flags);
- if (m->pgpath_to_activate == pgpath)
- m->pgpath_to_activate = NULL;
- spin_unlock_irqrestore(&m->lock, flags);
free_pgpath(pgpath);
}
}
@@ -198,11 +192,11 @@
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
+ INIT_LIST_HEAD(&m->queued_ios);
spin_lock_init(&m->lock);
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
- INIT_WORK(&m->activate_path, activate_path);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
@@ -250,11 +244,12 @@
m->pg_init_count = 0;
}
-static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg)
+static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
+ size_t nr_bytes)
{
struct dm_path *path;
- path = pg->ps.type->select_path(&pg->ps, &m->repeat_count);
+ path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
if (!path)
return -ENXIO;
@@ -266,7 +261,7 @@
return 0;
}
-static void __choose_pgpath(struct multipath *m)
+static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
{
struct priority_group *pg;
unsigned bypassed = 1;
@@ -278,12 +273,12 @@
if (m->next_pg) {
pg = m->next_pg;
m->next_pg = NULL;
- if (!__choose_path_in_pg(m, pg))
+ if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
/* Don't change PG until it has no remaining paths */
- if (m->current_pg && !__choose_path_in_pg(m, m->current_pg))
+ if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
return;
/*
@@ -295,7 +290,7 @@
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
- if (!__choose_path_in_pg(m, pg))
+ if (!__choose_path_in_pg(m, pg, nr_bytes))
return;
}
} while (bypassed--);
@@ -322,19 +317,21 @@
dm_noflush_suspending(m->ti));
}
-static int map_io(struct multipath *m, struct bio *bio,
+static int map_io(struct multipath *m, struct request *clone,
struct dm_mpath_io *mpio, unsigned was_queued)
{
int r = DM_MAPIO_REMAPPED;
+ size_t nr_bytes = blk_rq_bytes(clone);
unsigned long flags;
struct pgpath *pgpath;
+ struct block_device *bdev;
spin_lock_irqsave(&m->lock, flags);
/* Do we need to select a new pgpath? */
if (!m->current_pgpath ||
(!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
- __choose_pgpath(m);
+ __choose_pgpath(m, nr_bytes);
pgpath = m->current_pgpath;
@@ -344,21 +341,28 @@
if ((pgpath && m->queue_io) ||
(!pgpath && m->queue_if_no_path)) {
/* Queue for the daemon to resubmit */
- bio_list_add(&m->queued_ios, bio);
+ list_add_tail(&clone->queuelist, &m->queued_ios);
m->queue_size++;
if ((m->pg_init_required && !m->pg_init_in_progress) ||
!m->queue_io)
queue_work(kmultipathd, &m->process_queued_ios);
pgpath = NULL;
r = DM_MAPIO_SUBMITTED;
- } else if (pgpath)
- bio->bi_bdev = pgpath->path.dev->bdev;
- else if (__must_push_back(m))
+ } else if (pgpath) {
+ bdev = pgpath->path.dev->bdev;
+ clone->q = bdev_get_queue(bdev);
+ clone->rq_disk = bdev->bd_disk;
+ } else if (__must_push_back(m))
r = DM_MAPIO_REQUEUE;
else
r = -EIO; /* Failed */
mpio->pgpath = pgpath;
+ mpio->nr_bytes = nr_bytes;
+
+ if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
+ pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
+ nr_bytes);
spin_unlock_irqrestore(&m->lock, flags);
@@ -396,30 +400,31 @@
{
int r;
unsigned long flags;
- struct bio *bio = NULL, *next;
struct dm_mpath_io *mpio;
union map_info *info;
+ struct request *clone, *n;
+ LIST_HEAD(cl);
spin_lock_irqsave(&m->lock, flags);
- bio = bio_list_get(&m->queued_ios);
+ list_splice_init(&m->queued_ios, &cl);
spin_unlock_irqrestore(&m->lock, flags);
- while (bio) {
- next = bio->bi_next;
- bio->bi_next = NULL;
+ list_for_each_entry_safe(clone, n, &cl, queuelist) {
+ list_del_init(&clone->queuelist);
- info = dm_get_mapinfo(bio);
+ info = dm_get_rq_mapinfo(clone);
mpio = info->ptr;
- r = map_io(m, bio, mpio, 1);
- if (r < 0)
- bio_endio(bio, r);
- else if (r == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
- else if (r == DM_MAPIO_REQUEUE)
- bio_endio(bio, -EIO);
-
- bio = next;
+ r = map_io(m, clone, mpio, 1);
+ if (r < 0) {
+ mempool_free(mpio, m->mpio_pool);
+ dm_kill_unmapped_request(clone, r);
+ } else if (r == DM_MAPIO_REMAPPED)
+ dm_dispatch_request(clone);
+ else if (r == DM_MAPIO_REQUEUE) {
+ mempool_free(mpio, m->mpio_pool);
+ dm_requeue_unmapped_request(clone);
+ }
}
}
@@ -427,8 +432,8 @@
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
- struct pgpath *pgpath = NULL;
- unsigned init_required = 0, must_queue = 1;
+ struct pgpath *pgpath = NULL, *tmp;
+ unsigned must_queue = 1;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
@@ -437,7 +442,7 @@
goto out;
if (!m->current_pgpath)
- __choose_pgpath(m);
+ __choose_pgpath(m, 0);
pgpath = m->current_pgpath;
@@ -446,19 +451,15 @@
must_queue = 0;
if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
- m->pgpath_to_activate = pgpath;
m->pg_init_count++;
m->pg_init_required = 0;
- m->pg_init_in_progress = 1;
- init_required = 1;
+ list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
+ if (queue_work(kmpath_handlerd, &tmp->activate_path))
+ m->pg_init_in_progress++;
+ }
}
-
out:
spin_unlock_irqrestore(&m->lock, flags);
-
- if (init_required)
- queue_work(kmpath_handlerd, &m->activate_path);
-
if (!must_queue)
dispatch_queued_ios(m);
}
@@ -553,6 +554,12 @@
return -EINVAL;
}
+ if (ps_argc > as->argc) {
+ dm_put_path_selector(pst);
+ ti->error = "not enough arguments for path selector";
+ return -EINVAL;
+ }
+
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
dm_put_path_selector(pst);
@@ -591,9 +598,20 @@
}
if (m->hw_handler_name) {
- r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
- m->hw_handler_name);
+ struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
+
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ if (r == -EBUSY) {
+ /*
+ * Already attached to different hw_handler,
+ * try to reattach with correct one.
+ */
+ scsi_dh_detach(q);
+ r = scsi_dh_attach(q, m->hw_handler_name);
+ }
+
if (r < 0) {
+ ti->error = "error attaching hardware handler";
dm_put_device(ti, p->path.dev);
goto bad;
}
@@ -699,6 +717,11 @@
if (!hw_argc)
return 0;
+ if (hw_argc > as->argc) {
+ ti->error = "not enough arguments for hardware handler";
+ return -EINVAL;
+ }
+
m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
@@ -823,6 +846,8 @@
goto bad;
}
+ ti->num_flush_requests = 1;
+
return 0;
bad:
@@ -836,25 +861,29 @@
flush_workqueue(kmpath_handlerd);
flush_workqueue(kmultipathd);
+ flush_scheduled_work();
free_multipath(m);
}
/*
- * Map bios, recording original fields for later in case we have to resubmit
+ * Map cloned requests
*/
-static int multipath_map(struct dm_target *ti, struct bio *bio,
+static int multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
int r;
struct dm_mpath_io *mpio;
struct multipath *m = (struct multipath *) ti->private;
- mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
- dm_bio_record(&mpio->details, bio);
+ mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
+ if (!mpio)
+ /* ENOMEM, requeue */
+ return DM_MAPIO_REQUEUE;
+ memset(mpio, 0, sizeof(*mpio));
map_context->ptr = mpio;
- bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
- r = map_io(m, bio, mpio, 0);
+ clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+ r = map_io(m, clone, mpio, 0);
if (r < 0 || r == DM_MAPIO_REQUEUE)
mempool_free(mpio, m->mpio_pool);
@@ -924,9 +953,13 @@
pgpath->is_active = 1;
- m->current_pgpath = NULL;
- if (!m->nr_valid_paths++ && m->queue_size)
+ if (!m->nr_valid_paths++ && m->queue_size) {
+ m->current_pgpath = NULL;
queue_work(kmultipathd, &m->process_queued_ios);
+ } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+ m->pg_init_in_progress++;
+ }
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
pgpath->path.dev->name, m->nr_valid_paths);
@@ -1102,87 +1135,70 @@
spin_lock_irqsave(&m->lock, flags);
if (errors) {
- DMERR("Could not failover device. Error %d.", errors);
- m->current_pgpath = NULL;
- m->current_pg = NULL;
+ if (pgpath == m->current_pgpath) {
+ DMERR("Could not failover device. Error %d.", errors);
+ m->current_pgpath = NULL;
+ m->current_pg = NULL;
+ }
} else if (!m->pg_init_required) {
m->queue_io = 0;
pg->bypassed = 0;
}
- m->pg_init_in_progress = 0;
- queue_work(kmultipathd, &m->process_queued_ios);
+ m->pg_init_in_progress--;
+ if (!m->pg_init_in_progress)
+ queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags);
}
static void activate_path(struct work_struct *work)
{
int ret;
- struct multipath *m =
- container_of(work, struct multipath, activate_path);
- struct dm_path *path;
- unsigned long flags;
+ struct pgpath *pgpath =
+ container_of(work, struct pgpath, activate_path);
- spin_lock_irqsave(&m->lock, flags);
- path = &m->pgpath_to_activate->path;
- m->pgpath_to_activate = NULL;
- spin_unlock_irqrestore(&m->lock, flags);
- if (!path)
- return;
- ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
- pg_init_done(path, ret);
+ ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
+ pg_init_done(&pgpath->path, ret);
}
/*
* end_io handling
*/
-static int do_end_io(struct multipath *m, struct bio *bio,
+static int do_end_io(struct multipath *m, struct request *clone,
int error, struct dm_mpath_io *mpio)
{
+ /*
+ * We don't queue any clone request inside the multipath target
+ * during end I/O handling, since those clone requests don't have
+ * bio clones. If we queue them inside the multipath target,
+ * we need to make bio clones, that requires memory allocation.
+ * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
+ * don't have bio clones.)
+ * Instead of queueing the clone request here, we queue the original
+ * request into dm core, which will remake a clone request and
+ * clone bios for it and resubmit it later.
+ */
+ int r = DM_ENDIO_REQUEUE;
unsigned long flags;
- if (!error)
+ if (!error && !clone->errors)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
- return error;
-
if (error == -EOPNOTSUPP)
return error;
- spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths) {
- if (__must_push_back(m)) {
- spin_unlock_irqrestore(&m->lock, flags);
- return DM_ENDIO_REQUEUE;
- } else if (!m->queue_if_no_path) {
- spin_unlock_irqrestore(&m->lock, flags);
- return -EIO;
- } else {
- spin_unlock_irqrestore(&m->lock, flags);
- goto requeue;
- }
- }
- spin_unlock_irqrestore(&m->lock, flags);
-
if (mpio->pgpath)
fail_path(mpio->pgpath);
- requeue:
- dm_bio_restore(&mpio->details, bio);
-
- /* queue for the daemon to resubmit or fail */
spin_lock_irqsave(&m->lock, flags);
- bio_list_add(&m->queued_ios, bio);
- m->queue_size++;
- if (!m->queue_io)
- queue_work(kmultipathd, &m->process_queued_ios);
+ if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
+ r = -EIO;
spin_unlock_irqrestore(&m->lock, flags);
- return DM_ENDIO_INCOMPLETE; /* io not complete */
+ return r;
}
-static int multipath_end_io(struct dm_target *ti, struct bio *bio,
+static int multipath_end_io(struct dm_target *ti, struct request *clone,
int error, union map_info *map_context)
{
struct multipath *m = ti->private;
@@ -1191,14 +1207,13 @@
struct path_selector *ps;
int r;
- r = do_end_io(m, bio, error, mpio);
+ r = do_end_io(m, clone, error, mpio);
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- if (r != DM_ENDIO_INCOMPLETE)
- mempool_free(mpio, m->mpio_pool);
+ mempool_free(mpio, m->mpio_pool);
return r;
}
@@ -1411,7 +1426,7 @@
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
- __choose_pgpath(m);
+ __choose_pgpath(m, 0);
if (m->current_pgpath) {
bdev = m->current_pgpath->path.dev->bdev;
@@ -1428,22 +1443,113 @@
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
+static int multipath_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct multipath *m = ti->private;
+ struct priority_group *pg;
+ struct pgpath *p;
+ int ret = 0;
+
+ list_for_each_entry(pg, &m->priority_groups, list) {
+ list_for_each_entry(p, &pg->pgpaths, list) {
+ ret = fn(ti, p->path.dev, ti->begin, data);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int __pgpath_busy(struct pgpath *pgpath)
+{
+ struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
+
+ return dm_underlying_device_busy(q);
+}
+
+/*
+ * We return "busy", only when we can map I/Os but underlying devices
+ * are busy (so even if we map I/Os now, the I/Os will wait on
+ * the underlying queue).
+ * In other words, if we want to kill I/Os or queue them inside us
+ * due to map unavailability, we don't return "busy". Otherwise,
+ * dm core won't give us the I/Os and we can't do what we want.
+ */
+static int multipath_busy(struct dm_target *ti)
+{
+ int busy = 0, has_active = 0;
+ struct multipath *m = ti->private;
+ struct priority_group *pg;
+ struct pgpath *pgpath;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+
+ /* Guess which priority_group will be used at next mapping time */
+ if (unlikely(!m->current_pgpath && m->next_pg))
+ pg = m->next_pg;
+ else if (likely(m->current_pg))
+ pg = m->current_pg;
+ else
+ /*
+ * We don't know which pg will be used at next mapping time.
+ * We don't call __choose_pgpath() here to avoid to trigger
+ * pg_init just by busy checking.
+ * So we don't know whether underlying devices we will be using
+ * at next mapping time are busy or not. Just try mapping.
+ */
+ goto out;
+
+ /*
+ * If there is one non-busy active path at least, the path selector
+ * will be able to select it. So we consider such a pg as not busy.
+ */
+ busy = 1;
+ list_for_each_entry(pgpath, &pg->pgpaths, list)
+ if (pgpath->is_active) {
+ has_active = 1;
+
+ if (!__pgpath_busy(pgpath)) {
+ busy = 0;
+ break;
+ }
+ }
+
+ if (!has_active)
+ /*
+ * No active path in this pg, so this pg won't be used and
+ * the current_pg will be changed at next mapping time.
+ * We need to try mapping to determine it.
+ */
+ busy = 0;
+
+out:
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return busy;
+}
+
/*-----------------------------------------------------------------
* Module setup
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 0, 5},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
- .map = multipath_map,
- .end_io = multipath_end_io,
+ .map_rq = multipath_map,
+ .rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend,
.resume = multipath_resume,
.status = multipath_status,
.message = multipath_message,
.ioctl = multipath_ioctl,
+ .iterate_devices = multipath_iterate_devices,
+ .busy = multipath_busy,
};
static int __init dm_multipath_init(void)
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index 27357b8..e7d1fa8 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -56,7 +56,8 @@
* the path fails.
*/
struct dm_path *(*select_path) (struct path_selector *ps,
- unsigned *repeat_count);
+ unsigned *repeat_count,
+ size_t nr_bytes);
/*
* Notify the selector that a path has failed.
@@ -75,7 +76,10 @@
int (*status) (struct path_selector *ps, struct dm_path *path,
status_type_t type, char *result, unsigned int maxlen);
- int (*end_io) (struct path_selector *ps, struct dm_path *path);
+ int (*start_io) (struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes);
+ int (*end_io) (struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes);
};
/* Register a path selector */
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
new file mode 100644
index 0000000..f92b6ce
--- /dev/null
+++ b/drivers/md/dm-queue-length.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2004-2005 IBM Corp. All Rights Reserved.
+ * Copyright (C) 2006-2009 NEC Corporation.
+ *
+ * dm-queue-length.c
+ *
+ * Module Author: Stefan Bader, IBM
+ * Modified by: Kiyoshi Ueda, NEC
+ *
+ * This file is released under the GPL.
+ *
+ * queue-length path selector - choose a path with the least number of
+ * in-flight I/Os.
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+#define DM_MSG_PREFIX "multipath queue-length"
+#define QL_MIN_IO 128
+#define QL_VERSION "0.1.0"
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned repeat_count;
+ atomic_t qlen; /* the number of in-flight I/Os */
+};
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ }
+
+ return s;
+}
+
+static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s = alloc_selector();
+
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+ return 0;
+}
+
+static void ql_free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void ql_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ ql_free_paths(&s->valid_paths);
+ ql_free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int ql_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct path_info *pi;
+
+ /* When called with NULL path, return selector status/args. */
+ if (!path)
+ DMEMIT("0 ");
+ else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%d ", atomic_read(&pi->qlen));
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%u ", pi->repeat_count);
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int ql_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned repeat_count = QL_MIN_IO;
+
+ /*
+ * Arguments: [<repeat_count>]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (QL_MIN_IO) is used.
+ */
+ if (argc > 1) {
+ *error = "queue-length ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if ((argc == 1) && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ *error = "queue-length ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ /* Allocate the path information structure */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "queue-length ps: Error allocating path information";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+ atomic_set(&pi->qlen, 0);
+
+ path->pscontext = pi;
+
+ list_add_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move(&pi->list, &s->failed_paths);
+}
+
+static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+/*
+ * Select a path having the minimum number of in-flight I/Os
+ */
+static struct dm_path *ql_select_path(struct path_selector *ps,
+ unsigned *repeat_count, size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+
+ if (list_empty(&s->valid_paths))
+ return NULL;
+
+ /* Change preferred (first in list) path to evenly balance. */
+ list_move_tail(s->valid_paths.next, &s->valid_paths);
+
+ list_for_each_entry(pi, &s->valid_paths, list) {
+ if (!best ||
+ (atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
+ best = pi;
+
+ if (!atomic_read(&best->qlen))
+ break;
+ }
+
+ if (!best)
+ return NULL;
+
+ *repeat_count = best->repeat_count;
+
+ return best->path;
+}
+
+static int ql_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_inc(&pi->qlen);
+
+ return 0;
+}
+
+static int ql_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_dec(&pi->qlen);
+
+ return 0;
+}
+
+static struct path_selector_type ql_ps = {
+ .name = "queue-length",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 1,
+ .create = ql_create,
+ .destroy = ql_destroy,
+ .status = ql_status,
+ .add_path = ql_add_path,
+ .fail_path = ql_fail_path,
+ .reinstate_path = ql_reinstate_path,
+ .select_path = ql_select_path,
+ .start_io = ql_start_io,
+ .end_io = ql_end_io,
+};
+
+static int __init dm_ql_init(void)
+{
+ int r = dm_register_path_selector(&ql_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " QL_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_ql_exit(void)
+{
+ int r = dm_unregister_path_selector(&ql_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_ql_init);
+module_exit(dm_ql_exit);
+
+MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>");
+MODULE_DESCRIPTION(
+ "(C) Copyright IBM Corp. 2004,2005 All Rights Reserved.\n"
+ DM_NAME " path selector to balance the number of in-flight I/Os"
+);
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 076fbb4..ce8868c 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1283,9 +1283,23 @@
return 0;
}
+static int mirror_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct mirror_set *ms = ti->private;
+ int ret = 0;
+ unsigned i;
+
+ for (i = 0; !ret && i < ms->nr_mirrors; i++)
+ ret = fn(ti, ms->mirror[i].dev,
+ ms->mirror[i].offset, data);
+
+ return ret;
+}
+
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 0, 20},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
@@ -1295,6 +1309,7 @@
.postsuspend = mirror_postsuspend,
.resume = mirror_resume,
.status = mirror_status,
+ .iterate_devices = mirror_iterate_devices,
};
static int __init dm_mirror_init(void)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 7b899be..36dbe29 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -283,7 +283,7 @@
nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
- nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
+ nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
DM_RH_CLEAN : DM_RH_NOSYNC;
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index cdfbf65..24752f4 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -161,7 +161,7 @@
}
static struct dm_path *rr_select_path(struct path_selector *ps,
- unsigned *repeat_count)
+ unsigned *repeat_count, size_t nr_bytes)
{
struct selector *s = (struct selector *) ps->context;
struct path_info *pi = NULL;
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
new file mode 100644
index 0000000..cfa668f
--- /dev/null
+++ b/drivers/md/dm-service-time.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright (C) 2007-2009 NEC Corporation. All Rights Reserved.
+ *
+ * Module Author: Kiyoshi Ueda
+ *
+ * This file is released under the GPL.
+ *
+ * Throughput oriented path selector.
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#define DM_MSG_PREFIX "multipath service-time"
+#define ST_MIN_IO 1
+#define ST_MAX_RELATIVE_THROUGHPUT 100
+#define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7
+#define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT)
+#define ST_VERSION "0.2.0"
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned repeat_count;
+ unsigned relative_throughput;
+ atomic_t in_flight_size; /* Total size of in-flight I/Os */
+};
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ }
+
+ return s;
+}
+
+static int st_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s = alloc_selector();
+
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+ return 0;
+}
+
+static void free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void st_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ free_paths(&s->valid_paths);
+ free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int st_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned maxlen)
+{
+ unsigned sz = 0;
+ struct path_info *pi;
+
+ if (!path)
+ DMEMIT("0 ");
+ else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%d %u ", atomic_read(&pi->in_flight_size),
+ pi->relative_throughput);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%u %u ", pi->repeat_count,
+ pi->relative_throughput);
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int st_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned repeat_count = ST_MIN_IO;
+ unsigned relative_throughput = 1;
+
+ /*
+ * Arguments: [<repeat_count> [<relative_throughput>]]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (ST_MIN_IO) is used.
+ * <relative_throughput>: The relative throughput value of
+ * the path among all paths in the path-group.
+ * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT>
+ * If not given, minimum value '1' is used.
+ * If '0' is given, the path isn't selected while
+ * other paths having a positive value are
+ * available.
+ */
+ if (argc > 2) {
+ *error = "service-time ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if (argc && (sscanf(argv[0], "%u", &repeat_count) != 1)) {
+ *error = "service-time ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ if ((argc == 2) &&
+ (sscanf(argv[1], "%u", &relative_throughput) != 1 ||
+ relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) {
+ *error = "service-time ps: invalid relative_throughput value";
+ return -EINVAL;
+ }
+
+ /* allocate the path */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "service-time ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+ pi->relative_throughput = relative_throughput;
+ atomic_set(&pi->in_flight_size, 0);
+
+ path->pscontext = pi;
+
+ list_add_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+static void st_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move(&pi->list, &s->failed_paths);
+}
+
+static int st_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+
+ list_move_tail(&pi->list, &s->valid_paths);
+
+ return 0;
+}
+
+/*
+ * Compare the estimated service time of 2 paths, pi1 and pi2,
+ * for the incoming I/O.
+ *
+ * Returns:
+ * < 0 : pi1 is better
+ * 0 : no difference between pi1 and pi2
+ * > 0 : pi2 is better
+ *
+ * Description:
+ * Basically, the service time is estimated by:
+ * ('pi->in-flight-size' + 'incoming') / 'pi->relative_throughput'
+ * To reduce the calculation, some optimizations are made.
+ * (See comments inline)
+ */
+static int st_compare_load(struct path_info *pi1, struct path_info *pi2,
+ size_t incoming)
+{
+ size_t sz1, sz2, st1, st2;
+
+ sz1 = atomic_read(&pi1->in_flight_size);
+ sz2 = atomic_read(&pi2->in_flight_size);
+
+ /*
+ * Case 1: Both have same throughput value. Choose less loaded path.
+ */
+ if (pi1->relative_throughput == pi2->relative_throughput)
+ return sz1 - sz2;
+
+ /*
+ * Case 2a: Both have same load. Choose higher throughput path.
+ * Case 2b: One path has no throughput value. Choose the other one.
+ */
+ if (sz1 == sz2 ||
+ !pi1->relative_throughput || !pi2->relative_throughput)
+ return pi2->relative_throughput - pi1->relative_throughput;
+
+ /*
+ * Case 3: Calculate service time. Choose faster path.
+ * Service time using pi1:
+ * st1 = (sz1 + incoming) / pi1->relative_throughput
+ * Service time using pi2:
+ * st2 = (sz2 + incoming) / pi2->relative_throughput
+ *
+ * To avoid the division, transform the expression to use
+ * multiplication.
+ * Because ->relative_throughput > 0 here, if st1 < st2,
+ * the expressions below are the same meaning:
+ * (sz1 + incoming) / pi1->relative_throughput <
+ * (sz2 + incoming) / pi2->relative_throughput
+ * (sz1 + incoming) * pi2->relative_throughput <
+ * (sz2 + incoming) * pi1->relative_throughput
+ * So use the later one.
+ */
+ sz1 += incoming;
+ sz2 += incoming;
+ if (unlikely(sz1 >= ST_MAX_INFLIGHT_SIZE ||
+ sz2 >= ST_MAX_INFLIGHT_SIZE)) {
+ /*
+ * Size may be too big for multiplying pi->relative_throughput
+ * and overflow.
+ * To avoid the overflow and mis-selection, shift down both.
+ */
+ sz1 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT;
+ sz2 >>= ST_MAX_RELATIVE_THROUGHPUT_SHIFT;
+ }
+ st1 = sz1 * pi2->relative_throughput;
+ st2 = sz2 * pi1->relative_throughput;
+ if (st1 != st2)
+ return st1 - st2;
+
+ /*
+ * Case 4: Service time is equal. Choose higher throughput path.
+ */
+ return pi2->relative_throughput - pi1->relative_throughput;
+}
+
+static struct dm_path *st_select_path(struct path_selector *ps,
+ unsigned *repeat_count, size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+
+ if (list_empty(&s->valid_paths))
+ return NULL;
+
+ /* Change preferred (first in list) path to evenly balance. */
+ list_move_tail(s->valid_paths.next, &s->valid_paths);
+
+ list_for_each_entry(pi, &s->valid_paths, list)
+ if (!best || (st_compare_load(pi, best, nr_bytes) < 0))
+ best = pi;
+
+ if (!best)
+ return NULL;
+
+ *repeat_count = best->repeat_count;
+
+ return best->path;
+}
+
+static int st_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_add(nr_bytes, &pi->in_flight_size);
+
+ return 0;
+}
+
+static int st_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+
+ atomic_sub(nr_bytes, &pi->in_flight_size);
+
+ return 0;
+}
+
+static struct path_selector_type st_ps = {
+ .name = "service-time",
+ .module = THIS_MODULE,
+ .table_args = 2,
+ .info_args = 2,
+ .create = st_create,
+ .destroy = st_destroy,
+ .status = st_status,
+ .add_path = st_add_path,
+ .fail_path = st_fail_path,
+ .reinstate_path = st_reinstate_path,
+ .select_path = st_select_path,
+ .start_io = st_start_io,
+ .end_io = st_end_io,
+};
+
+static int __init dm_st_init(void)
+{
+ int r = dm_register_path_selector(&st_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " ST_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_st_exit(void)
+{
+ int r = dm_unregister_path_selector(&st_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_st_init);
+module_exit(dm_st_exit);
+
+MODULE_DESCRIPTION(DM_NAME " throughput oriented path selector");
+MODULE_AUTHOR("Kiyoshi Ueda <k-ueda@ct.jp.nec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2662a41..6e3fe4f 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -636,7 +636,7 @@
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, WRITE))
+ if (ps->valid && area_io(ps, WRITE_BARRIER))
ps->valid = 0;
/*
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d73f17f..d573165 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -678,6 +678,7 @@
ti->private = s;
ti->split_io = s->store->chunk_size;
+ ti->num_flush_requests = 1;
return 0;
@@ -1030,6 +1031,11 @@
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ if (unlikely(bio_empty_barrier(bio))) {
+ bio->bi_bdev = s->store->cow->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
+
chunk = sector_to_chunk(s->store, bio->bi_sector);
/* Full snapshots are not usable */
@@ -1338,6 +1344,8 @@
}
ti->private = dev;
+ ti->num_flush_requests = 1;
+
return 0;
}
@@ -1353,6 +1361,9 @@
struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
+ if (unlikely(bio_empty_barrier(bio)))
+ return DM_MAPIO_REMAPPED;
+
/* Only tell snapshots if this is a write */
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 41569bc..b240e85 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -167,6 +167,7 @@
sc->stripes = stripes;
sc->stripe_width = width;
ti->split_io = chunk_size;
+ ti->num_flush_requests = stripes;
sc->chunk_mask = ((sector_t) chunk_size) - 1;
for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
@@ -211,10 +212,18 @@
union map_info *map_context)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
+ sector_t offset, chunk;
+ uint32_t stripe;
- sector_t offset = bio->bi_sector - ti->begin;
- sector_t chunk = offset >> sc->chunk_shift;
- uint32_t stripe = sector_div(chunk, sc->stripes);
+ if (unlikely(bio_empty_barrier(bio))) {
+ BUG_ON(map_context->flush_request >= sc->stripes);
+ bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
+
+ offset = bio->bi_sector - ti->begin;
+ chunk = offset >> sc->chunk_shift;
+ stripe = sector_div(chunk, sc->stripes);
bio->bi_bdev = sc->stripe[stripe].dev->bdev;
bio->bi_sector = sc->stripe[stripe].physical_start +
@@ -304,15 +313,31 @@
return error;
}
+static int stripe_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct stripe_c *sc = ti->private;
+ int ret = 0;
+ unsigned i = 0;
+
+ do
+ ret = fn(ti, sc->stripe[i].dev,
+ sc->stripe[i].physical_start, data);
+ while (!ret && ++i < sc->stripes);
+
+ return ret;
+}
+
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
.map = stripe_map,
.end_io = stripe_end_io,
.status = stripe_status,
+ .iterate_devices = stripe_iterate_devices,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index a2a45e6..4b04590 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -57,12 +57,21 @@
return strlen(buf);
}
+static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
+{
+ sprintf(buf, "%d\n", dm_suspended(md));
+
+ return strlen(buf);
+}
+
static DM_ATTR_RO(name);
static DM_ATTR_RO(uuid);
+static DM_ATTR_RO(suspended);
static struct attribute *dm_attrs[] = {
&dm_attr_name.attr,
&dm_attr_uuid.attr,
+ &dm_attr_suspended.attr,
NULL,
};
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index e9a73bb..4899ebe 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -41,6 +41,7 @@
struct dm_table {
struct mapped_device *md;
atomic_t holders;
+ unsigned type;
/* btree table */
unsigned int depth;
@@ -62,15 +63,11 @@
/* a list of devices used by this table */
struct list_head devices;
- /*
- * These are optimistic limits taken from all the
- * targets, some targets will need smaller limits.
- */
- struct io_restrictions limits;
-
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
+
+ struct dm_md_mempools *mempools;
};
/*
@@ -89,43 +86,6 @@
}
/*
- * Returns the minimum that is _not_ zero, unless both are zero.
- */
-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-
-/*
- * Combine two io_restrictions, always taking the lower value.
- */
-static void combine_restrictions_low(struct io_restrictions *lhs,
- struct io_restrictions *rhs)
-{
- lhs->max_sectors =
- min_not_zero(lhs->max_sectors, rhs->max_sectors);
-
- lhs->max_phys_segments =
- min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
-
- lhs->max_hw_segments =
- min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
-
- lhs->logical_block_size = max(lhs->logical_block_size,
- rhs->logical_block_size);
-
- lhs->max_segment_size =
- min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
-
- lhs->max_hw_sectors =
- min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
-
- lhs->seg_boundary_mask =
- min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
-
- lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
-
- lhs->no_cluster |= rhs->no_cluster;
-}
-
-/*
* Calculate the index of the child node of the n'th node k'th key.
*/
static inline unsigned int get_child(unsigned int n, unsigned int k)
@@ -267,6 +227,8 @@
list_for_each_safe(tmp, next, devices) {
struct dm_dev_internal *dd =
list_entry(tmp, struct dm_dev_internal, list);
+ DMWARN("dm_table_destroy: dm_put_device call missing for %s",
+ dd->dm_dev.name);
kfree(dd);
}
}
@@ -296,12 +258,10 @@
vfree(t->highs);
/* free the device list */
- if (t->devices.next != &t->devices) {
- DMWARN("devices still present during destroy: "
- "dm_table_remove_device calls missing");
-
+ if (t->devices.next != &t->devices)
free_devices(&t->devices);
- }
+
+ dm_free_md_mempools(t->mempools);
kfree(t);
}
@@ -385,15 +345,48 @@
/*
* If possible, this checks an area of a destination device is valid.
*/
-static int check_device_area(struct dm_dev_internal *dd, sector_t start,
- sector_t len)
+static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, void *data)
{
- sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
+ sector_t dev_size =
+ i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ unsigned short logical_block_size_sectors =
+ limits->logical_block_size >> SECTOR_SHIFT;
+ char b[BDEVNAME_SIZE];
if (!dev_size)
return 1;
- return ((start < dev_size) && (len <= (dev_size - start)));
+ if ((start >= dev_size) || (start + ti->len > dev_size)) {
+ DMWARN("%s: %s too small for target",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
+ return 0;
+ }
+
+ if (logical_block_size_sectors <= 1)
+ return 1;
+
+ if (start & (logical_block_size_sectors - 1)) {
+ DMWARN("%s: start=%llu not aligned to h/w "
+ "logical block size %hu of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ limits->logical_block_size, bdevname(bdev, b));
+ return 0;
+ }
+
+ if (ti->len & (logical_block_size_sectors - 1)) {
+ DMWARN("%s: len=%llu not aligned to h/w "
+ "logical block size %hu of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)ti->len,
+ limits->logical_block_size, bdevname(bdev, b));
+ return 0;
+ }
+
+ return 1;
}
/*
@@ -479,38 +472,32 @@
}
atomic_inc(&dd->count);
- if (!check_device_area(dd, start, len)) {
- DMWARN("device %s too small for target", path);
- dm_put_device(ti, &dd->dm_dev);
- return -EINVAL;
- }
-
*result = &dd->dm_dev;
-
return 0;
}
-void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
+/*
+ * Returns the minimum that is _not_ zero, unless both are zero.
+ */
+#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
+
+int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, void *data)
{
+ struct queue_limits *limits = data;
+ struct block_device *bdev = dev->bdev;
struct request_queue *q = bdev_get_queue(bdev);
- struct io_restrictions *rs = &ti->limits;
char b[BDEVNAME_SIZE];
if (unlikely(!q)) {
DMWARN("%s: Cannot set limits for nonexistent device %s",
dm_device_name(ti->table->md), bdevname(bdev, b));
- return;
+ return 0;
}
- /*
- * Combine the device limits low.
- *
- * FIXME: if we move an io_restriction struct
- * into q this would just be a call to
- * combine_restrictions_low()
- */
- rs->max_sectors =
- min_not_zero(rs->max_sectors, queue_max_sectors(q));
+ if (blk_stack_limits(limits, &q->limits, start) < 0)
+ DMWARN("%s: target device %s is misaligned",
+ dm_device_name(ti->table->md), bdevname(bdev, b));
/*
* Check if merge fn is supported.
@@ -519,48 +506,21 @@
*/
if (q->merge_bvec_fn && !ti->type->merge)
- rs->max_sectors =
- min_not_zero(rs->max_sectors,
+ limits->max_sectors =
+ min_not_zero(limits->max_sectors,
(unsigned int) (PAGE_SIZE >> 9));
-
- rs->max_phys_segments =
- min_not_zero(rs->max_phys_segments,
- queue_max_phys_segments(q));
-
- rs->max_hw_segments =
- min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
-
- rs->logical_block_size = max(rs->logical_block_size,
- queue_logical_block_size(q));
-
- rs->max_segment_size =
- min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
-
- rs->max_hw_sectors =
- min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
-
- rs->seg_boundary_mask =
- min_not_zero(rs->seg_boundary_mask,
- queue_segment_boundary(q));
-
- rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
-
- rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ return 0;
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
sector_t len, fmode_t mode, struct dm_dev **result)
{
- int r = __table_get_device(ti->table, ti, path,
- start, len, mode, result);
-
- if (!r)
- dm_set_device_limits(ti, (*result)->bdev);
-
- return r;
+ return __table_get_device(ti->table, ti, path,
+ start, len, mode, result);
}
+
/*
* Decrement a devices use count and remove it if necessary.
*/
@@ -675,24 +635,78 @@
return 0;
}
-static void check_for_valid_limits(struct io_restrictions *rs)
+/*
+ * Impose necessary and sufficient conditions on a devices's table such
+ * that any incoming bio which respects its logical_block_size can be
+ * processed successfully. If it falls across the boundary between
+ * two or more targets, the size of each piece it gets split into must
+ * be compatible with the logical_block_size of the target processing it.
+ */
+static int validate_hardware_logical_block_alignment(struct dm_table *table,
+ struct queue_limits *limits)
{
- if (!rs->max_sectors)
- rs->max_sectors = SAFE_MAX_SECTORS;
- if (!rs->max_hw_sectors)
- rs->max_hw_sectors = SAFE_MAX_SECTORS;
- if (!rs->max_phys_segments)
- rs->max_phys_segments = MAX_PHYS_SEGMENTS;
- if (!rs->max_hw_segments)
- rs->max_hw_segments = MAX_HW_SEGMENTS;
- if (!rs->logical_block_size)
- rs->logical_block_size = 1 << SECTOR_SHIFT;
- if (!rs->max_segment_size)
- rs->max_segment_size = MAX_SEGMENT_SIZE;
- if (!rs->seg_boundary_mask)
- rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
- if (!rs->bounce_pfn)
- rs->bounce_pfn = -1;
+ /*
+ * This function uses arithmetic modulo the logical_block_size
+ * (in units of 512-byte sectors).
+ */
+ unsigned short device_logical_block_size_sects =
+ limits->logical_block_size >> SECTOR_SHIFT;
+
+ /*
+ * Offset of the start of the next table entry, mod logical_block_size.
+ */
+ unsigned short next_target_start = 0;
+
+ /*
+ * Given an aligned bio that extends beyond the end of a
+ * target, how many sectors must the next target handle?
+ */
+ unsigned short remaining = 0;
+
+ struct dm_target *uninitialized_var(ti);
+ struct queue_limits ti_limits;
+ unsigned i = 0;
+
+ /*
+ * Check each entry in the table in turn.
+ */
+ while (i < dm_table_get_num_targets(table)) {
+ ti = dm_table_get_target(table, i++);
+
+ blk_set_default_limits(&ti_limits);
+
+ /* combine all target devices' limits */
+ if (ti->type->iterate_devices)
+ ti->type->iterate_devices(ti, dm_set_device_limits,
+ &ti_limits);
+
+ /*
+ * If the remaining sectors fall entirely within this
+ * table entry are they compatible with its logical_block_size?
+ */
+ if (remaining < ti->len &&
+ remaining & ((ti_limits.logical_block_size >>
+ SECTOR_SHIFT) - 1))
+ break; /* Error */
+
+ next_target_start =
+ (unsigned short) ((next_target_start + ti->len) &
+ (device_logical_block_size_sects - 1));
+ remaining = next_target_start ?
+ device_logical_block_size_sects - next_target_start : 0;
+ }
+
+ if (remaining) {
+ DMWARN("%s: table line %u (start sect %llu len %llu) "
+ "not aligned to h/w logical block size %hu",
+ dm_device_name(table->md), i,
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len,
+ limits->logical_block_size);
+ return -EINVAL;
+ }
+
+ return 0;
}
int dm_table_add_target(struct dm_table *t, const char *type,
@@ -747,9 +761,6 @@
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- /* FIXME: the plan is to combine high here and then have
- * the merge fn apply the target level restrictions. */
- combine_restrictions_low(&t->limits, &tgt->limits);
return 0;
bad:
@@ -758,6 +769,104 @@
return r;
}
+int dm_table_set_type(struct dm_table *t)
+{
+ unsigned i;
+ unsigned bio_based = 0, request_based = 0;
+ struct dm_target *tgt;
+ struct dm_dev_internal *dd;
+ struct list_head *devices;
+
+ for (i = 0; i < t->num_targets; i++) {
+ tgt = t->targets + i;
+ if (dm_target_request_based(tgt))
+ request_based = 1;
+ else
+ bio_based = 1;
+
+ if (bio_based && request_based) {
+ DMWARN("Inconsistent table: different target types"
+ " can't be mixed up");
+ return -EINVAL;
+ }
+ }
+
+ if (bio_based) {
+ /* We must use this table as bio-based */
+ t->type = DM_TYPE_BIO_BASED;
+ return 0;
+ }
+
+ BUG_ON(!request_based); /* No targets in this table */
+
+ /* Non-request-stackable devices can't be used for request-based dm */
+ devices = dm_table_get_devices(t);
+ list_for_each_entry(dd, devices, list) {
+ if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
+ DMWARN("table load rejected: including"
+ " non-request-stackable devices");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Request-based dm supports only tables that have a single target now.
+ * To support multiple targets, request splitting support is needed,
+ * and that needs lots of changes in the block-layer.
+ * (e.g. request completion process for partial completion.)
+ */
+ if (t->num_targets > 1) {
+ DMWARN("Request-based dm doesn't support multiple targets yet");
+ return -EINVAL;
+ }
+
+ t->type = DM_TYPE_REQUEST_BASED;
+
+ return 0;
+}
+
+unsigned dm_table_get_type(struct dm_table *t)
+{
+ return t->type;
+}
+
+bool dm_table_bio_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_BIO_BASED;
+}
+
+bool dm_table_request_based(struct dm_table *t)
+{
+ return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
+}
+
+int dm_table_alloc_md_mempools(struct dm_table *t)
+{
+ unsigned type = dm_table_get_type(t);
+
+ if (unlikely(type == DM_TYPE_NONE)) {
+ DMWARN("no table type is set, can't allocate mempools");
+ return -EINVAL;
+ }
+
+ t->mempools = dm_alloc_md_mempools(type);
+ if (!t->mempools)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void dm_table_free_md_mempools(struct dm_table *t)
+{
+ dm_free_md_mempools(t->mempools);
+ t->mempools = NULL;
+}
+
+struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
+{
+ return t->mempools;
+}
+
static int setup_indexes(struct dm_table *t)
{
int i;
@@ -792,8 +901,6 @@
int r = 0;
unsigned int leaf_nodes;
- check_for_valid_limits(&t->limits);
-
/* how many indexes will the btree have ? */
leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
@@ -869,6 +976,57 @@
}
/*
+ * Establish the new table's queue_limits and validate them.
+ */
+int dm_calculate_queue_limits(struct dm_table *table,
+ struct queue_limits *limits)
+{
+ struct dm_target *uninitialized_var(ti);
+ struct queue_limits ti_limits;
+ unsigned i = 0;
+
+ blk_set_default_limits(limits);
+
+ while (i < dm_table_get_num_targets(table)) {
+ blk_set_default_limits(&ti_limits);
+
+ ti = dm_table_get_target(table, i++);
+
+ if (!ti->type->iterate_devices)
+ goto combine_limits;
+
+ /*
+ * Combine queue limits of all the devices this target uses.
+ */
+ ti->type->iterate_devices(ti, dm_set_device_limits,
+ &ti_limits);
+
+ /*
+ * Check each device area is consistent with the target's
+ * overall queue limits.
+ */
+ if (!ti->type->iterate_devices(ti, device_area_is_valid,
+ &ti_limits))
+ return -EINVAL;
+
+combine_limits:
+ /*
+ * Merge this target's queue limits into the overall limits
+ * for the table.
+ */
+ if (blk_stack_limits(limits, &ti_limits, 0) < 0)
+ DMWARN("%s: target device "
+ "(start sect %llu len %llu) "
+ "is misaligned",
+ dm_device_name(table->md),
+ (unsigned long long) ti->begin,
+ (unsigned long long) ti->len);
+ }
+
+ return validate_hardware_logical_block_alignment(table, limits);
+}
+
+/*
* Set the integrity profile for this device if all devices used have
* matching profiles.
*/
@@ -907,27 +1065,42 @@
return;
}
-void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
+void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *limits)
{
/*
- * Make sure we obey the optimistic sub devices
- * restrictions.
+ * Each target device in the table has a data area that should normally
+ * be aligned such that the DM device's alignment_offset is 0.
+ * FIXME: Propagate alignment_offsets up the stack and warn of
+ * sub-optimal or inconsistent settings.
*/
- blk_queue_max_sectors(q, t->limits.max_sectors);
- blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
- blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
- blk_queue_logical_block_size(q, t->limits.logical_block_size);
- blk_queue_max_segment_size(q, t->limits.max_segment_size);
- blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
- blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
- blk_queue_bounce_limit(q, t->limits.bounce_pfn);
+ limits->alignment_offset = 0;
+ limits->misaligned = 0;
- if (t->limits.no_cluster)
+ /*
+ * Copy table's limits to the DM device's request_queue
+ */
+ q->limits = *limits;
+
+ if (limits->no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
dm_table_set_integrity(t);
+
+ /*
+ * QUEUE_FLAG_STACKABLE must be set after all queue settings are
+ * visible to other CPUs because, once the flag is set, incoming bios
+ * are processed by request-based dm, which refers to the queue
+ * settings.
+ * Until the flag set, bios are passed to bio-based dm and queued to
+ * md->deferred where queue settings are not needed yet.
+ * Those bios are passed to request-based dm at the resume time.
+ */
+ smp_mb();
+ if (dm_table_request_based(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -1023,6 +1196,20 @@
return r;
}
+int dm_table_any_busy_target(struct dm_table *t)
+{
+ unsigned i;
+ struct dm_target *ti;
+
+ for (i = 0; i < t->num_targets; i++) {
+ ti = t->targets + i;
+ if (ti->type->busy && ti->type->busy(ti))
+ return 1;
+ }
+
+ return 0;
+}
+
void dm_table_unplug_all(struct dm_table *t)
{
struct dm_dev_internal *dd;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 48db308..3c6d4ee 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -24,6 +24,13 @@
#define DM_MSG_PREFIX "core"
+/*
+ * Cookies are numeric values sent with CHANGE and REMOVE
+ * uevents while resuming, removing or renaming the device.
+ */
+#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
+#define DM_COOKIE_LENGTH 24
+
static const char *_name = DM_NAME;
static unsigned int major = 0;
@@ -71,7 +78,7 @@
*/
struct dm_rq_clone_bio_info {
struct bio *orig;
- struct request *rq;
+ struct dm_rq_target_io *tio;
};
union map_info *dm_get_mapinfo(struct bio *bio)
@@ -81,6 +88,14 @@
return NULL;
}
+union map_info *dm_get_rq_mapinfo(struct request *rq)
+{
+ if (rq && rq->end_io_data)
+ return &((struct dm_rq_target_io *)rq->end_io_data)->info;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
+
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -157,13 +172,31 @@
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
- struct block_device *suspended_bdev;
+ struct block_device *bdev;
/* forced geometry settings */
struct hd_geometry geometry;
+ /* marker of flush suspend for request-based dm */
+ struct request suspend_rq;
+
+ /* For saving the address of __make_request for request based dm */
+ make_request_fn *saved_make_request_fn;
+
/* sysfs handle */
struct kobject kobj;
+
+ /* zero-length barrier that will be cloned and submitted to targets */
+ struct bio barrier_bio;
+};
+
+/*
+ * For mempools pre-allocation at the table loading time.
+ */
+struct dm_md_mempools {
+ mempool_t *io_pool;
+ mempool_t *tio_pool;
+ struct bio_set *bs;
};
#define MIN_IOS 256
@@ -391,16 +424,31 @@
mempool_free(io, md->io_pool);
}
-static struct dm_target_io *alloc_tio(struct mapped_device *md)
-{
- return mempool_alloc(md->tio_pool, GFP_NOIO);
-}
-
static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
{
mempool_free(tio, md->tio_pool);
}
+static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md)
+{
+ return mempool_alloc(md->tio_pool, GFP_ATOMIC);
+}
+
+static void free_rq_tio(struct dm_rq_target_io *tio)
+{
+ mempool_free(tio, tio->md->tio_pool);
+}
+
+static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
+{
+ return mempool_alloc(md->io_pool, GFP_ATOMIC);
+}
+
+static void free_bio_info(struct dm_rq_clone_bio_info *info)
+{
+ mempool_free(info, info->tio->md->io_pool);
+}
+
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
@@ -464,12 +512,13 @@
struct dm_table *dm_get_table(struct mapped_device *md)
{
struct dm_table *t;
+ unsigned long flags;
- read_lock(&md->map_lock);
+ read_lock_irqsave(&md->map_lock, flags);
t = md->map;
if (t)
dm_table_get(t);
- read_unlock(&md->map_lock);
+ read_unlock_irqrestore(&md->map_lock, flags);
return t;
}
@@ -536,9 +585,11 @@
* Target requested pushing back the I/O.
*/
spin_lock_irqsave(&md->deferred_lock, flags);
- if (__noflush_suspending(md))
- bio_list_add_head(&md->deferred, io->bio);
- else
+ if (__noflush_suspending(md)) {
+ if (!bio_barrier(io->bio))
+ bio_list_add_head(&md->deferred,
+ io->bio);
+ } else
/* noflush suspend was interrupted. */
io->error = -EIO;
spin_unlock_irqrestore(&md->deferred_lock, flags);
@@ -553,7 +604,8 @@
* a per-device variable for error reporting.
* Note that you can't touch the bio after end_io_acct
*/
- md->barrier_error = io_error;
+ if (!md->barrier_error && io_error != -EOPNOTSUPP)
+ md->barrier_error = io_error;
end_io_acct(io);
} else {
end_io_acct(io);
@@ -607,6 +659,262 @@
dec_pending(io, error);
}
+/*
+ * Partial completion handling for request-based dm
+ */
+static void end_clone_bio(struct bio *clone, int error)
+{
+ struct dm_rq_clone_bio_info *info = clone->bi_private;
+ struct dm_rq_target_io *tio = info->tio;
+ struct bio *bio = info->orig;
+ unsigned int nr_bytes = info->orig->bi_size;
+
+ bio_put(clone);
+
+ if (tio->error)
+ /*
+ * An error has already been detected on the request.
+ * Once error occurred, just let clone->end_io() handle
+ * the remainder.
+ */
+ return;
+ else if (error) {
+ /*
+ * Don't notice the error to the upper layer yet.
+ * The error handling decision is made by the target driver,
+ * when the request is completed.
+ */
+ tio->error = error;
+ return;
+ }
+
+ /*
+ * I/O for the bio successfully completed.
+ * Notice the data completion to the upper layer.
+ */
+
+ /*
+ * bios are processed from the head of the list.
+ * So the completing bio should always be rq->bio.
+ * If it's not, something wrong is happening.
+ */
+ if (tio->orig->bio != bio)
+ DMERR("bio completion is going in the middle of the request");
+
+ /*
+ * Update the original request.
+ * Do not use blk_end_request() here, because it may complete
+ * the original request before the clone, and break the ordering.
+ */
+ blk_update_request(tio->orig, 0, nr_bytes);
+}
+
+/*
+ * Don't touch any member of the md after calling this function because
+ * the md may be freed in dm_put() at the end of this function.
+ * Or do dm_get() before calling this function and dm_put() later.
+ */
+static void rq_completed(struct mapped_device *md, int run_queue)
+{
+ int wakeup_waiters = 0;
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!queue_in_flight(q))
+ wakeup_waiters = 1;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ /* nudge anyone waiting on suspend queue */
+ if (wakeup_waiters)
+ wake_up(&md->wait);
+
+ if (run_queue)
+ blk_run_queue(q);
+
+ /*
+ * dm_put() must be at the end of this function. See the comment above
+ */
+ dm_put(md);
+}
+
+static void dm_unprep_request(struct request *rq)
+{
+ struct request *clone = rq->special;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ rq->special = NULL;
+ rq->cmd_flags &= ~REQ_DONTPREP;
+
+ blk_rq_unprep_clone(clone);
+ free_rq_tio(tio);
+}
+
+/*
+ * Requeue the original request of a clone.
+ */
+void dm_requeue_unmapped_request(struct request *clone)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ dm_unprep_request(rq);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (elv_queue_empty(q))
+ blk_plug_device(q);
+ blk_requeue_request(q, rq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ rq_completed(md, 0);
+}
+EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
+
+static void __stop_queue(struct request_queue *q)
+{
+ blk_stop_queue(q);
+}
+
+static void stop_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void __start_queue(struct request_queue *q)
+{
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+}
+
+static void start_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Complete the clone and the original request.
+ * Must be called without queue lock.
+ */
+static void dm_end_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+
+ if (blk_pc_request(rq)) {
+ rq->errors = clone->errors;
+ rq->resid_len = clone->resid_len;
+
+ if (rq->sense)
+ /*
+ * We are using the sense buffer of the original
+ * request.
+ * So setting the length of the sense data is enough.
+ */
+ rq->sense_len = clone->sense_len;
+ }
+
+ BUG_ON(clone->bio);
+ free_rq_tio(tio);
+
+ blk_end_request_all(rq, error);
+
+ rq_completed(md, 1);
+}
+
+/*
+ * Request completion handler for request-based dm
+ */
+static void dm_softirq_done(struct request *rq)
+{
+ struct request *clone = rq->completion_data;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+ int error = tio->error;
+
+ if (!(rq->cmd_flags & REQ_FAILED) && rq_end_io)
+ error = rq_end_io(tio->ti, clone, error, &tio->info);
+
+ if (error <= 0)
+ /* The target wants to complete the I/O */
+ dm_end_request(clone, error);
+ else if (error == DM_ENDIO_INCOMPLETE)
+ /* The target will handle the I/O */
+ return;
+ else if (error == DM_ENDIO_REQUEUE)
+ /* The target wants to requeue the I/O */
+ dm_requeue_unmapped_request(clone);
+ else {
+ DMWARN("unimplemented target endio return value: %d", error);
+ BUG();
+ }
+}
+
+/*
+ * Complete the clone and the original request with the error status
+ * through softirq context.
+ */
+static void dm_complete_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct request *rq = tio->orig;
+
+ tio->error = error;
+ rq->completion_data = clone;
+ blk_complete_request(rq);
+}
+
+/*
+ * Complete the not-mapped clone and the original request with the error status
+ * through softirq context.
+ * Target's rq_end_io() function isn't called.
+ * This may be used when the target's map_rq() function fails.
+ */
+void dm_kill_unmapped_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct request *rq = tio->orig;
+
+ rq->cmd_flags |= REQ_FAILED;
+ dm_complete_request(clone, error);
+}
+EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
+
+/*
+ * Called with the queue lock held
+ */
+static void end_clone_request(struct request *clone, int error)
+{
+ /*
+ * For just cleaning up the information of the queue in which
+ * the clone was dispatched.
+ * The clone is *NOT* freed actually here because it is alloced from
+ * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
+ */
+ __blk_put_request(clone->q, clone);
+
+ /*
+ * Actual request completion is done in a softirq context which doesn't
+ * hold the queue lock. Otherwise, deadlock could occur because:
+ * - another request may be submitted by the upper level driver
+ * of the stacking during the completion
+ * - the submission which requires queue lock may be done
+ * against this queue
+ */
+ dm_complete_request(clone, error);
+}
+
static sector_t max_io_len(struct mapped_device *md,
sector_t sector, struct dm_target *ti)
{
@@ -634,11 +942,6 @@
sector_t sector;
struct mapped_device *md;
- /*
- * Sanity checks.
- */
- BUG_ON(!clone->bi_size);
-
clone->bi_end_io = clone_endio;
clone->bi_private = tio;
@@ -752,6 +1055,48 @@
return clone;
}
+static struct dm_target_io *alloc_tio(struct clone_info *ci,
+ struct dm_target *ti)
+{
+ struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
+
+ tio->io = ci->io;
+ tio->ti = ti;
+ memset(&tio->info, 0, sizeof(tio->info));
+
+ return tio;
+}
+
+static void __flush_target(struct clone_info *ci, struct dm_target *ti,
+ unsigned flush_nr)
+{
+ struct dm_target_io *tio = alloc_tio(ci, ti);
+ struct bio *clone;
+
+ tio->info.flush_request = flush_nr;
+
+ clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+ __bio_clone(clone, ci->bio);
+ clone->bi_destructor = dm_bio_destructor;
+
+ __map_bio(ti, clone, tio);
+}
+
+static int __clone_and_map_empty_barrier(struct clone_info *ci)
+{
+ unsigned target_nr = 0, flush_nr;
+ struct dm_target *ti;
+
+ while ((ti = dm_table_get_target(ci->map, target_nr++)))
+ for (flush_nr = 0; flush_nr < ti->num_flush_requests;
+ flush_nr++)
+ __flush_target(ci, ti, flush_nr);
+
+ ci->sector_count = 0;
+
+ return 0;
+}
+
static int __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
@@ -759,6 +1104,9 @@
sector_t len = 0, max;
struct dm_target_io *tio;
+ if (unlikely(bio_empty_barrier(bio)))
+ return __clone_and_map_empty_barrier(ci);
+
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
@@ -768,10 +1116,7 @@
/*
* Allocate a target io object.
*/
- tio = alloc_tio(ci->md);
- tio->io = ci->io;
- tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
+ tio = alloc_tio(ci, ti);
if (ci->sector_count <= max) {
/*
@@ -827,10 +1172,7 @@
max = max_io_len(ci->md, ci->sector, ti);
- tio = alloc_tio(ci->md);
- tio->io = ci->io;
- tio->ti = ti;
- memset(&tio->info, 0, sizeof(tio->info));
+ tio = alloc_tio(ci, ti);
}
len = min(remaining, max);
@@ -865,7 +1207,8 @@
if (!bio_barrier(bio))
bio_io_error(bio);
else
- md->barrier_error = -EIO;
+ if (!md->barrier_error)
+ md->barrier_error = -EIO;
return;
}
@@ -878,6 +1221,8 @@
ci.io->md = md;
ci.sector = bio->bi_sector;
ci.sector_count = bio_sectors(bio);
+ if (unlikely(bio_empty_barrier(bio)))
+ ci.sector_count = 1;
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
@@ -925,6 +1270,16 @@
*/
if (max_size && ti->type->merge)
max_size = ti->type->merge(ti, bvm, biovec, max_size);
+ /*
+ * If the target doesn't support merge method and some of the devices
+ * provided their merge_bvec method (we know this by looking at
+ * queue_max_hw_sectors), then we can't allow bios with multiple vector
+ * entries. So always set max_size to 0, and the code below allows
+ * just one page.
+ */
+ else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
+
+ max_size = 0;
out_table:
dm_table_put(map);
@@ -943,7 +1298,7 @@
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int dm_request(struct request_queue *q, struct bio *bio)
+static int _dm_request(struct request_queue *q, struct bio *bio)
{
int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
@@ -980,12 +1335,274 @@
return 0;
}
+static int dm_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct mapped_device *md = q->queuedata;
+
+ if (unlikely(bio_barrier(bio))) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return 0;
+ }
+
+ return md->saved_make_request_fn(q, bio); /* call __make_request() */
+}
+
+static int dm_request_based(struct mapped_device *md)
+{
+ return blk_queue_stackable(md->queue);
+}
+
+static int dm_request(struct request_queue *q, struct bio *bio)
+{
+ struct mapped_device *md = q->queuedata;
+
+ if (dm_request_based(md))
+ return dm_make_request(q, bio);
+
+ return _dm_request(q, bio);
+}
+
+void dm_dispatch_request(struct request *rq)
+{
+ int r;
+
+ if (blk_queue_io_stat(rq->q))
+ rq->cmd_flags |= REQ_IO_STAT;
+
+ rq->start_time = jiffies;
+ r = blk_insert_cloned_request(rq->q, rq);
+ if (r)
+ dm_complete_request(rq, r);
+}
+EXPORT_SYMBOL_GPL(dm_dispatch_request);
+
+static void dm_rq_bio_destructor(struct bio *bio)
+{
+ struct dm_rq_clone_bio_info *info = bio->bi_private;
+ struct mapped_device *md = info->tio->md;
+
+ free_bio_info(info);
+ bio_free(bio, md->bs);
+}
+
+static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
+ void *data)
+{
+ struct dm_rq_target_io *tio = data;
+ struct mapped_device *md = tio->md;
+ struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
+
+ if (!info)
+ return -ENOMEM;
+
+ info->orig = bio_orig;
+ info->tio = tio;
+ bio->bi_end_io = end_clone_bio;
+ bio->bi_private = info;
+ bio->bi_destructor = dm_rq_bio_destructor;
+
+ return 0;
+}
+
+static int setup_clone(struct request *clone, struct request *rq,
+ struct dm_rq_target_io *tio)
+{
+ int r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
+ dm_rq_bio_constructor, tio);
+
+ if (r)
+ return r;
+
+ clone->cmd = rq->cmd;
+ clone->cmd_len = rq->cmd_len;
+ clone->sense = rq->sense;
+ clone->buffer = rq->buffer;
+ clone->end_io = end_clone_request;
+ clone->end_io_data = tio;
+
+ return 0;
+}
+
+static int dm_rq_flush_suspending(struct mapped_device *md)
+{
+ return !md->suspend_rq.special;
+}
+
+/*
+ * Called with the queue lock held.
+ */
+static int dm_prep_fn(struct request_queue *q, struct request *rq)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_rq_target_io *tio;
+ struct request *clone;
+
+ if (unlikely(rq == &md->suspend_rq)) {
+ if (dm_rq_flush_suspending(md))
+ return BLKPREP_OK;
+ else
+ /* The flush suspend was interrupted */
+ return BLKPREP_KILL;
+ }
+
+ if (unlikely(rq->special)) {
+ DMWARN("Already has something in rq->special.");
+ return BLKPREP_KILL;
+ }
+
+ tio = alloc_rq_tio(md); /* Only one for each original request */
+ if (!tio)
+ /* -ENOMEM */
+ return BLKPREP_DEFER;
+
+ tio->md = md;
+ tio->ti = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ memset(&tio->info, 0, sizeof(tio->info));
+
+ clone = &tio->clone;
+ if (setup_clone(clone, rq, tio)) {
+ /* -ENOMEM */
+ free_rq_tio(tio);
+ return BLKPREP_DEFER;
+ }
+
+ rq->special = clone;
+ rq->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+}
+
+static void map_request(struct dm_target *ti, struct request *rq,
+ struct mapped_device *md)
+{
+ int r;
+ struct request *clone = rq->special;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+
+ tio->ti = ti;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ switch (r) {
+ case DM_MAPIO_SUBMITTED:
+ /* The target has taken the I/O to submit by itself later */
+ break;
+ case DM_MAPIO_REMAPPED:
+ /* The target has remapped the I/O so dispatch it */
+ dm_dispatch_request(clone);
+ break;
+ case DM_MAPIO_REQUEUE:
+ /* The target wants to requeue the I/O */
+ dm_requeue_unmapped_request(clone);
+ break;
+ default:
+ if (r > 0) {
+ DMWARN("unimplemented target map return value: %d", r);
+ BUG();
+ }
+
+ /* The target wants to complete the I/O */
+ dm_kill_unmapped_request(clone, r);
+ break;
+ }
+}
+
+/*
+ * q->request_fn for request-based dm.
+ * Called with the queue lock held.
+ */
+static void dm_request_fn(struct request_queue *q)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+ struct dm_target *ti;
+ struct request *rq;
+
+ /*
+ * For noflush suspend, check blk_queue_stopped() to immediately
+ * quit I/O dispatching.
+ */
+ while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
+ rq = blk_peek_request(q);
+ if (!rq)
+ goto plug_and_out;
+
+ if (unlikely(rq == &md->suspend_rq)) { /* Flush suspend maker */
+ if (queue_in_flight(q))
+ /* Not quiet yet. Wait more */
+ goto plug_and_out;
+
+ /* This device should be quiet now */
+ __stop_queue(q);
+ blk_start_request(rq);
+ __blk_end_request_all(rq, 0);
+ wake_up(&md->wait);
+ goto out;
+ }
+
+ ti = dm_table_find_target(map, blk_rq_pos(rq));
+ if (ti->type->busy && ti->type->busy(ti))
+ goto plug_and_out;
+
+ blk_start_request(rq);
+ spin_unlock(q->queue_lock);
+ map_request(ti, rq, md);
+ spin_lock_irq(q->queue_lock);
+ }
+
+ goto out;
+
+plug_and_out:
+ if (!elv_queue_empty(q))
+ /* Some requests still remain, retry later */
+ blk_plug_device(q);
+
+out:
+ dm_table_put(map);
+
+ return;
+}
+
+int dm_underlying_device_busy(struct request_queue *q)
+{
+ return blk_lld_busy(q);
+}
+EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
+
+static int dm_lld_busy(struct request_queue *q)
+{
+ int r;
+ struct mapped_device *md = q->queuedata;
+ struct dm_table *map = dm_get_table(md);
+
+ if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
+ r = 1;
+ else
+ r = dm_table_any_busy_target(map);
+
+ dm_table_put(map);
+
+ return r;
+}
+
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_table(md);
if (map) {
+ if (dm_request_based(md))
+ generic_unplug_device(q);
+
dm_table_unplug_all(map);
dm_table_put(map);
}
@@ -1000,7 +1617,16 @@
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
map = dm_get_table(md);
if (map) {
- r = dm_table_any_congested(map, bdi_bits);
+ /*
+ * Request-based dm cares about only own queue for
+ * the query about congestion status of request_queue
+ */
+ if (dm_request_based(md))
+ r = md->queue->backing_dev_info.state &
+ bdi_bits;
+ else
+ r = dm_table_any_congested(map, bdi_bits);
+
dm_table_put(map);
}
}
@@ -1123,30 +1749,32 @@
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_alloc_queue(GFP_KERNEL);
+ md->queue = blk_init_queue(dm_request_fn, NULL);
if (!md->queue)
goto bad_queue;
+ /*
+ * Request-based dm devices cannot be stacked on top of bio-based dm
+ * devices. The type of this dm device has not been decided yet,
+ * although we initialized the queue using blk_init_queue().
+ * The type is decided at the first table loading time.
+ * To prevent problematic device stacking, clear the queue flag
+ * for request stacking support until then.
+ *
+ * This queue is new, so no concurrency on the queue_flags.
+ */
+ queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+ md->saved_make_request_fn = md->queue->make_request_fn;
md->queue->queuedata = md;
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
- blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
-
- md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
- if (!md->io_pool)
- goto bad_io_pool;
-
- md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
- if (!md->tio_pool)
- goto bad_tio_pool;
-
- md->bs = bioset_create(16, 0);
- if (!md->bs)
- goto bad_no_bioset;
+ blk_queue_softirq_done(md->queue, dm_softirq_done);
+ blk_queue_prep_rq(md->queue, dm_prep_fn);
+ blk_queue_lld_busy(md->queue, dm_lld_busy);
md->disk = alloc_disk(1);
if (!md->disk)
@@ -1170,6 +1798,10 @@
if (!md->wq)
goto bad_thread;
+ md->bdev = bdget_disk(md->disk, 0);
+ if (!md->bdev)
+ goto bad_bdev;
+
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
@@ -1179,15 +1811,11 @@
return md;
+bad_bdev:
+ destroy_workqueue(md->wq);
bad_thread:
put_disk(md->disk);
bad_disk:
- bioset_free(md->bs);
-bad_no_bioset:
- mempool_destroy(md->tio_pool);
-bad_tio_pool:
- mempool_destroy(md->io_pool);
-bad_io_pool:
blk_cleanup_queue(md->queue);
bad_queue:
free_minor(minor);
@@ -1204,14 +1832,15 @@
{
int minor = MINOR(disk_devt(md->disk));
- if (md->suspended_bdev) {
- unlock_fs(md);
- bdput(md->suspended_bdev);
- }
+ unlock_fs(md);
+ bdput(md->bdev);
destroy_workqueue(md->wq);
- mempool_destroy(md->tio_pool);
- mempool_destroy(md->io_pool);
- bioset_free(md->bs);
+ if (md->tio_pool)
+ mempool_destroy(md->tio_pool);
+ if (md->io_pool)
+ mempool_destroy(md->io_pool);
+ if (md->bs)
+ bioset_free(md->bs);
blk_integrity_unregister(md->disk);
del_gendisk(md->disk);
free_minor(minor);
@@ -1226,6 +1855,29 @@
kfree(md);
}
+static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
+{
+ struct dm_md_mempools *p;
+
+ if (md->io_pool && md->tio_pool && md->bs)
+ /* the md already has necessary mempools */
+ goto out;
+
+ p = dm_table_get_md_mempools(t);
+ BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
+
+ md->io_pool = p->io_pool;
+ p->io_pool = NULL;
+ md->tio_pool = p->tio_pool;
+ p->tio_pool = NULL;
+ md->bs = p->bs;
+ p->bs = NULL;
+
+out:
+ /* mempool bind completed, now no need any mempools in the table */
+ dm_table_free_md_mempools(t);
+}
+
/*
* Bind a table to the device.
*/
@@ -1249,15 +1901,17 @@
{
set_capacity(md->disk, size);
- mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
- i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
- mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
+ mutex_lock(&md->bdev->bd_inode->i_mutex);
+ i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
+ mutex_unlock(&md->bdev->bd_inode->i_mutex);
}
-static int __bind(struct mapped_device *md, struct dm_table *t)
+static int __bind(struct mapped_device *md, struct dm_table *t,
+ struct queue_limits *limits)
{
struct request_queue *q = md->queue;
sector_t size;
+ unsigned long flags;
size = dm_table_get_size(t);
@@ -1267,8 +1921,7 @@
if (size != get_capacity(md->disk))
memset(&md->geometry, 0, sizeof(md->geometry));
- if (md->suspended_bdev)
- __set_size(md, size);
+ __set_size(md, size);
if (!size) {
dm_table_destroy(t);
@@ -1277,10 +1930,22 @@
dm_table_event_callback(t, event_callback, md);
- write_lock(&md->map_lock);
+ /*
+ * The queue hasn't been stopped yet, if the old table type wasn't
+ * for request-based during suspension. So stop it to prevent
+ * I/O mapping before resume.
+ * This must be done before setting the queue restrictions,
+ * because request-based dm may be run just after the setting.
+ */
+ if (dm_table_request_based(t) && !blk_queue_stopped(q))
+ stop_queue(q);
+
+ __bind_mempools(md, t);
+
+ write_lock_irqsave(&md->map_lock, flags);
md->map = t;
- dm_table_set_restrictions(t, q);
- write_unlock(&md->map_lock);
+ dm_table_set_restrictions(t, q, limits);
+ write_unlock_irqrestore(&md->map_lock, flags);
return 0;
}
@@ -1288,14 +1953,15 @@
static void __unbind(struct mapped_device *md)
{
struct dm_table *map = md->map;
+ unsigned long flags;
if (!map)
return;
dm_table_event_callback(map, NULL, NULL);
- write_lock(&md->map_lock);
+ write_lock_irqsave(&md->map_lock, flags);
md->map = NULL;
- write_unlock(&md->map_lock);
+ write_unlock_irqrestore(&md->map_lock, flags);
dm_table_destroy(map);
}
@@ -1399,6 +2065,8 @@
{
int r = 0;
DECLARE_WAITQUEUE(wait, current);
+ struct request_queue *q = md->queue;
+ unsigned long flags;
dm_unplug_all(md->queue);
@@ -1408,7 +2076,14 @@
set_current_state(interruptible);
smp_mb();
- if (!atomic_read(&md->pending))
+ if (dm_request_based(md)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!queue_in_flight(q) && blk_queue_stopped(q)) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ } else if (!atomic_read(&md->pending))
break;
if (interruptible == TASK_INTERRUPTIBLE &&
@@ -1426,34 +2101,36 @@
return r;
}
-static int dm_flush(struct mapped_device *md)
+static void dm_flush(struct mapped_device *md)
{
dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
- return 0;
+
+ bio_init(&md->barrier_bio);
+ md->barrier_bio.bi_bdev = md->bdev;
+ md->barrier_bio.bi_rw = WRITE_BARRIER;
+ __split_and_process_bio(md, &md->barrier_bio);
+
+ dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
}
static void process_barrier(struct mapped_device *md, struct bio *bio)
{
- int error = dm_flush(md);
+ md->barrier_error = 0;
- if (unlikely(error)) {
- bio_endio(bio, error);
- return;
+ dm_flush(md);
+
+ if (!bio_empty_barrier(bio)) {
+ __split_and_process_bio(md, bio);
+ dm_flush(md);
}
- if (bio_empty_barrier(bio)) {
- bio_endio(bio, 0);
- return;
- }
-
- __split_and_process_bio(md, bio);
-
- error = dm_flush(md);
-
- if (!error && md->barrier_error)
- error = md->barrier_error;
if (md->barrier_error != DM_ENDIO_REQUEUE)
- bio_endio(bio, error);
+ bio_endio(bio, md->barrier_error);
+ else {
+ spin_lock_irq(&md->deferred_lock);
+ bio_list_add_head(&md->deferred, bio);
+ spin_unlock_irq(&md->deferred_lock);
+ }
}
/*
@@ -1479,10 +2156,14 @@
up_write(&md->io_lock);
- if (bio_barrier(c))
- process_barrier(md, c);
- else
- __split_and_process_bio(md, c);
+ if (dm_request_based(md))
+ generic_make_request(c);
+ else {
+ if (bio_barrier(c))
+ process_barrier(md, c);
+ else
+ __split_and_process_bio(md, c);
+ }
down_write(&md->io_lock);
}
@@ -1502,6 +2183,7 @@
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
+ struct queue_limits limits;
int r = -EINVAL;
mutex_lock(&md->suspend_lock);
@@ -1510,19 +2192,96 @@
if (!dm_suspended(md))
goto out;
- /* without bdev, the device size cannot be changed */
- if (!md->suspended_bdev)
- if (get_capacity(md->disk) != dm_table_get_size(table))
- goto out;
+ r = dm_calculate_queue_limits(table, &limits);
+ if (r)
+ goto out;
+
+ /* cannot change the device type, once a table is bound */
+ if (md->map &&
+ (dm_table_get_type(md->map) != dm_table_get_type(table))) {
+ DMWARN("can't change the device type after a table is bound");
+ goto out;
+ }
+
+ /*
+ * It is enought that blk_queue_ordered() is called only once when
+ * the first bio-based table is bound.
+ *
+ * This setting should be moved to alloc_dev() when request-based dm
+ * supports barrier.
+ */
+ if (!md->map && dm_table_bio_based(table))
+ blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
__unbind(md);
- r = __bind(md, table);
+ r = __bind(md, table, &limits);
out:
mutex_unlock(&md->suspend_lock);
return r;
}
+static void dm_rq_invalidate_suspend_marker(struct mapped_device *md)
+{
+ md->suspend_rq.special = (void *)0x1;
+}
+
+static void dm_rq_abort_suspend(struct mapped_device *md, int noflush)
+{
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!noflush)
+ dm_rq_invalidate_suspend_marker(md);
+ __start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_rq_start_suspend(struct mapped_device *md, int noflush)
+{
+ struct request *rq = &md->suspend_rq;
+ struct request_queue *q = md->queue;
+
+ if (noflush)
+ stop_queue(q);
+ else {
+ blk_rq_init(q, rq);
+ blk_insert_request(q, rq, 0, NULL);
+ }
+}
+
+static int dm_rq_suspend_available(struct mapped_device *md, int noflush)
+{
+ int r = 1;
+ struct request *rq = &md->suspend_rq;
+ struct request_queue *q = md->queue;
+ unsigned long flags;
+
+ if (noflush)
+ return r;
+
+ /* The marker must be protected by queue lock if it is in use */
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (unlikely(rq->ref_count)) {
+ /*
+ * This can happen, when the previous flush suspend was
+ * interrupted, the marker is still in the queue and
+ * this flush suspend has been invoked, because we don't
+ * remove the marker at the time of suspend interruption.
+ * We have only one marker per mapped_device, so we can't
+ * start another flush suspend while it is in use.
+ */
+ BUG_ON(!rq->special); /* The marker should be invalidated */
+ DMWARN("Invalidating the previous flush suspend is still in"
+ " progress. Please retry later.");
+ r = 0;
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return r;
+}
+
/*
* Functions to lock and unlock any filesystem running on the
* device.
@@ -1533,7 +2292,7 @@
WARN_ON(md->frozen_sb);
- md->frozen_sb = freeze_bdev(md->suspended_bdev);
+ md->frozen_sb = freeze_bdev(md->bdev);
if (IS_ERR(md->frozen_sb)) {
r = PTR_ERR(md->frozen_sb);
md->frozen_sb = NULL;
@@ -1542,9 +2301,6 @@
set_bit(DMF_FROZEN, &md->flags);
- /* don't bdput right now, we don't want the bdev
- * to go away while it is locked.
- */
return 0;
}
@@ -1553,7 +2309,7 @@
if (!test_bit(DMF_FROZEN, &md->flags))
return;
- thaw_bdev(md->suspended_bdev, md->frozen_sb);
+ thaw_bdev(md->bdev, md->frozen_sb);
md->frozen_sb = NULL;
clear_bit(DMF_FROZEN, &md->flags);
}
@@ -1565,6 +2321,53 @@
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
+/*
+ * Suspend mechanism in request-based dm.
+ *
+ * After the suspend starts, further incoming requests are kept in
+ * the request_queue and deferred.
+ * Remaining requests in the request_queue at the start of suspend are flushed
+ * if it is flush suspend.
+ * The suspend completes when the following conditions have been satisfied,
+ * so wait for it:
+ * 1. q->in_flight is 0 (which means no in_flight request)
+ * 2. queue has been stopped (which means no request dispatching)
+ *
+ *
+ * Noflush suspend
+ * ---------------
+ * Noflush suspend doesn't need to dispatch remaining requests.
+ * So stop the queue immediately. Then, wait for all in_flight requests
+ * to be completed or requeued.
+ *
+ * To abort noflush suspend, start the queue.
+ *
+ *
+ * Flush suspend
+ * -------------
+ * Flush suspend needs to dispatch remaining requests. So stop the queue
+ * after the remaining requests are completed. (Requeued request must be also
+ * re-dispatched and completed. Until then, we can't stop the queue.)
+ *
+ * During flushing the remaining requests, further incoming requests are also
+ * inserted to the same queue. To distinguish which requests are to be
+ * flushed, we insert a marker request to the queue at the time of starting
+ * flush suspend, like a barrier.
+ * The dispatching is blocked when the marker is found on the top of the queue.
+ * And the queue is stopped when all in_flight requests are completed, since
+ * that means the remaining requests are completely flushed.
+ * Then, the marker is removed from the queue.
+ *
+ * To abort flush suspend, we also need to take care of the marker, not only
+ * starting the queue.
+ * We don't remove the marker forcibly from the queue since it's against
+ * the block-layer manner. Instead, we put a invalidated mark on the marker.
+ * When the invalidated marker is found on the top of the queue, it is
+ * immediately removed from the queue, so it doesn't block dispatching.
+ * Because we have only one marker per mapped_device, we can't start another
+ * flush suspend until the invalidated marker is removed from the queue.
+ * So fail and return with -EBUSY in such a case.
+ */
int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
{
struct dm_table *map = NULL;
@@ -1579,6 +2382,11 @@
goto out_unlock;
}
+ if (dm_request_based(md) && !dm_rq_suspend_available(md, noflush)) {
+ r = -EBUSY;
+ goto out_unlock;
+ }
+
map = dm_get_table(md);
/*
@@ -1591,24 +2399,14 @@
/* This does not get reverted if there's an error later. */
dm_table_presuspend_targets(map);
- /* bdget() can stall if the pending I/Os are not flushed */
- if (!noflush) {
- md->suspended_bdev = bdget_disk(md->disk, 0);
- if (!md->suspended_bdev) {
- DMWARN("bdget failed in dm_suspend");
- r = -ENOMEM;
+ /*
+ * Flush I/O to the device. noflush supersedes do_lockfs,
+ * because lock_fs() needs to flush I/Os.
+ */
+ if (!noflush && do_lockfs) {
+ r = lock_fs(md);
+ if (r)
goto out;
- }
-
- /*
- * Flush I/O to the device. noflush supersedes do_lockfs,
- * because lock_fs() needs to flush I/Os.
- */
- if (do_lockfs) {
- r = lock_fs(md);
- if (r)
- goto out;
- }
}
/*
@@ -1634,6 +2432,9 @@
flush_workqueue(md->wq);
+ if (dm_request_based(md))
+ dm_rq_start_suspend(md, noflush);
+
/*
* At this point no more requests are entering target request routines.
* We call dm_wait_for_completion to wait for all existing requests
@@ -1650,6 +2451,9 @@
if (r < 0) {
dm_queue_flush(md);
+ if (dm_request_based(md))
+ dm_rq_abort_suspend(md, noflush);
+
unlock_fs(md);
goto out; /* pushback list is already flushed, so skip flush */
}
@@ -1665,11 +2469,6 @@
set_bit(DMF_SUSPENDED, &md->flags);
out:
- if (r && md->suspended_bdev) {
- bdput(md->suspended_bdev);
- md->suspended_bdev = NULL;
- }
-
dm_table_put(map);
out_unlock:
@@ -1696,21 +2495,20 @@
dm_queue_flush(md);
- unlock_fs(md);
+ /*
+ * Flushing deferred I/Os must be done after targets are resumed
+ * so that mapping of targets can work correctly.
+ * Request-based dm is queueing the deferred I/Os in its request_queue.
+ */
+ if (dm_request_based(md))
+ start_queue(md->queue);
- if (md->suspended_bdev) {
- bdput(md->suspended_bdev);
- md->suspended_bdev = NULL;
- }
+ unlock_fs(md);
clear_bit(DMF_SUSPENDED, &md->flags);
dm_table_unplug_all(map);
-
- dm_kobject_uevent(md);
-
r = 0;
-
out:
dm_table_put(map);
mutex_unlock(&md->suspend_lock);
@@ -1721,9 +2519,19 @@
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
-void dm_kobject_uevent(struct mapped_device *md)
+void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie)
{
- kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
+ char udev_cookie[DM_COOKIE_LENGTH];
+ char *envp[] = { udev_cookie, NULL };
+
+ if (!cookie)
+ kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ else {
+ snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+ DM_COOKIE_ENV_VAR_NAME, cookie);
+ kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
+ }
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
@@ -1777,6 +2585,10 @@
if (&md->kobj != kobj)
return NULL;
+ if (test_bit(DMF_FREEING, &md->flags) ||
+ test_bit(DMF_DELETING, &md->flags))
+ return NULL;
+
dm_get(md);
return md;
}
@@ -1797,6 +2609,61 @@
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
+{
+ struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
+
+ if (!pools)
+ return NULL;
+
+ pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
+ mempool_create_slab_pool(MIN_IOS, _io_cache) :
+ mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
+ if (!pools->io_pool)
+ goto free_pools_and_out;
+
+ pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
+ mempool_create_slab_pool(MIN_IOS, _tio_cache) :
+ mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
+ if (!pools->tio_pool)
+ goto free_io_pool_and_out;
+
+ pools->bs = (type == DM_TYPE_BIO_BASED) ?
+ bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
+ if (!pools->bs)
+ goto free_tio_pool_and_out;
+
+ return pools;
+
+free_tio_pool_and_out:
+ mempool_destroy(pools->tio_pool);
+
+free_io_pool_and_out:
+ mempool_destroy(pools->io_pool);
+
+free_pools_and_out:
+ kfree(pools);
+
+ return NULL;
+}
+
+void dm_free_md_mempools(struct dm_md_mempools *pools)
+{
+ if (!pools)
+ return;
+
+ if (pools->io_pool)
+ mempool_destroy(pools->io_pool);
+
+ if (pools->tio_pool)
+ mempool_destroy(pools->tio_pool);
+
+ if (pools->bs)
+ bioset_free(pools->bs);
+
+ kfree(pools);
+}
+
static struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index a31506d..23278ae 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -23,6 +23,13 @@
#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
/*
+ * Type of table and mapped_device's mempool
+ */
+#define DM_TYPE_NONE 0
+#define DM_TYPE_BIO_BASED 1
+#define DM_TYPE_REQUEST_BASED 2
+
+/*
* List of devices that a metadevice uses and should open/close.
*/
struct dm_dev_internal {
@@ -32,6 +39,7 @@
};
struct dm_table;
+struct dm_md_mempools;
/*-----------------------------------------------------------------
* Internal table functions.
@@ -41,18 +49,34 @@
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
-void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q);
+int dm_calculate_queue_limits(struct dm_table *table,
+ struct queue_limits *limits);
+void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ struct queue_limits *limits);
struct list_head *dm_table_get_devices(struct dm_table *t);
void dm_table_presuspend_targets(struct dm_table *t);
void dm_table_postsuspend_targets(struct dm_table *t);
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
+int dm_table_any_busy_target(struct dm_table *t);
+int dm_table_set_type(struct dm_table *t);
+unsigned dm_table_get_type(struct dm_table *t);
+bool dm_table_bio_based(struct dm_table *t);
+bool dm_table_request_based(struct dm_table *t);
+int dm_table_alloc_md_mempools(struct dm_table *t);
+void dm_table_free_md_mempools(struct dm_table *t);
+struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
/*
* To check the return value from dm_table_find_target().
*/
#define dm_target_is_valid(t) ((t)->table)
+/*
+ * To check whether the target type is request-based or not (bio-based).
+ */
+#define dm_target_request_based(t) ((t)->type->map_rq != NULL)
+
/*-----------------------------------------------------------------
* A registry of target types.
*---------------------------------------------------------------*/
@@ -92,9 +116,16 @@
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
-void dm_kobject_uevent(struct mapped_device *md);
+void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie);
int dm_kcopyd_init(void);
void dm_kcopyd_exit(void);
+/*
+ * Mempool operations
+ */
+struct dm_md_mempools *dm_alloc_md_mempools(unsigned type);
+void dm_free_md_mempools(struct dm_md_mempools *pools);
+
#endif
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index 3fe158a..4216328 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -2750,3 +2750,26 @@
[0x1b] = KEY_B, /*recall*/
};
EXPORT_SYMBOL_GPL(ir_codes_dm1105_nec);
+
+/* EVGA inDtube
+ Devin Heitmueller <devin.heitmueller@gmail.com>
+ */
+IR_KEYTAB_TYPE ir_codes_evga_indtube[IR_KEYTAB_SIZE] = {
+ [0x12] = KEY_POWER,
+ [0x02] = KEY_MODE, /* TV */
+ [0x14] = KEY_MUTE,
+ [0x1a] = KEY_CHANNELUP,
+ [0x16] = KEY_TV2, /* PIP */
+ [0x1d] = KEY_VOLUMEUP,
+ [0x05] = KEY_CHANNELDOWN,
+ [0x0f] = KEY_PLAYPAUSE,
+ [0x19] = KEY_VOLUMEDOWN,
+ [0x1c] = KEY_REWIND,
+ [0x0d] = KEY_RECORD,
+ [0x18] = KEY_FORWARD,
+ [0x1e] = KEY_PREVIOUS,
+ [0x1b] = KEY_STOP,
+ [0x1f] = KEY_NEXT,
+ [0x13] = KEY_CAMERA,
+};
+EXPORT_SYMBOL_GPL(ir_codes_evga_indtube);
diff --git a/drivers/media/dvb/frontends/stv0900.h b/drivers/media/dvb/frontends/stv0900.h
index 8a1332c..bf4e9b6 100644
--- a/drivers/media/dvb/frontends/stv0900.h
+++ b/drivers/media/dvb/frontends/stv0900.h
@@ -29,6 +29,11 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
+struct stv0900_reg {
+ u16 addr;
+ u8 val;
+};
+
struct stv0900_config {
u8 demod_address;
u32 xtal;
@@ -38,7 +43,7 @@
u8 path1_mode;
u8 path2_mode;
-
+ struct stv0900_reg *ts_config_regs;
u8 tun1_maddress;/* 0, 1, 2, 3 for 0xc0, 0xc2, 0xc4, 0xc6 */
u8 tun2_maddress;
u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 8499bcf..1da045f 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -149,31 +149,31 @@
dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
}
-u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg_addr)
+u8 stv0900_read_reg(struct stv0900_internal *i_params, u16 reg)
{
- u8 data[2];
int ret;
- struct i2c_msg i2cmsg = {
- .addr = i_params->i2c_addr,
- .flags = 0,
- .len = 2,
- .buf = data,
+ u8 b0[] = { MSB(reg), LSB(reg) };
+ u8 buf = 0;
+ struct i2c_msg msg[] = {
+ {
+ .addr = i_params->i2c_addr,
+ .flags = 0,
+ .buf = b0,
+ .len = 2,
+ }, {
+ .addr = i_params->i2c_addr,
+ .flags = I2C_M_RD,
+ .buf = &buf,
+ .len = 1,
+ },
};
- data[0] = MSB(reg_addr);
- data[1] = LSB(reg_addr);
+ ret = i2c_transfer(i_params->i2c_adap, msg, 2);
+ if (ret != 2)
+ dprintk(KERN_ERR "%s: i2c error %d, reg[0x%02x]\n",
+ __func__, ret, reg);
- ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1);
- if (ret != 1)
- dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
-
- i2cmsg.flags = I2C_M_RD;
- i2cmsg.len = 1;
- ret = i2c_transfer(i_params->i2c_adap, &i2cmsg, 1);
- if (ret != 1)
- dprintk(KERN_ERR "%s: i2c error %d\n", __func__, ret);
-
- return data[0];
+ return buf;
}
void extract_mask_pos(u32 label, u8 *mask, u8 *pos)
@@ -712,6 +712,44 @@
return c_n;
}
+static int stv0900_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks)
+{
+ struct stv0900_state *state = fe->demodulator_priv;
+ struct stv0900_internal *i_params = state->internal;
+ enum fe_stv0900_demod_num demod = state->demod;
+ u8 err_val1, err_val0;
+ s32 err_field1, err_field0;
+ u32 header_err_val = 0;
+
+ *ucblocks = 0x0;
+ if (stv0900_get_standard(fe, demod) == STV0900_DVBS2_STANDARD) {
+ /* DVB-S2 delineator errors count */
+
+ /* retreiving number for errnous headers */
+ dmd_reg(err_field0, R0900_P1_BBFCRCKO0,
+ R0900_P2_BBFCRCKO0);
+ dmd_reg(err_field1, R0900_P1_BBFCRCKO1,
+ R0900_P2_BBFCRCKO1);
+
+ err_val1 = stv0900_read_reg(i_params, err_field1);
+ err_val0 = stv0900_read_reg(i_params, err_field0);
+ header_err_val = (err_val1<<8) | err_val0;
+
+ /* retreiving number for errnous packets */
+ dmd_reg(err_field0, R0900_P1_UPCRCKO0,
+ R0900_P2_UPCRCKO0);
+ dmd_reg(err_field1, R0900_P1_UPCRCKO1,
+ R0900_P2_UPCRCKO1);
+
+ err_val1 = stv0900_read_reg(i_params, err_field1);
+ err_val0 = stv0900_read_reg(i_params, err_field0);
+ *ucblocks = (err_val1<<8) | err_val0;
+ *ucblocks += header_err_val;
+ }
+
+ return 0;
+}
+
static int stv0900_read_snr(struct dvb_frontend *fe, u16 *snr)
{
*snr = stv0900_carr_get_quality(fe,
@@ -1355,7 +1393,7 @@
struct stv0900_state *state = fe->demodulator_priv;
enum fe_stv0900_error error = STV0900_NO_ERROR;
enum fe_stv0900_error demodError = STV0900_NO_ERROR;
- int selosci;
+ int selosci, i;
struct stv0900_inode *temp_int = find_inode(state->i2c_adap,
state->config->demod_address);
@@ -1402,7 +1440,23 @@
stv0900_write_bits(state->internal, F0900_P1_ROLLOFF_CONTROL, p_init->rolloff);
stv0900_write_bits(state->internal, F0900_P2_ROLLOFF_CONTROL, p_init->rolloff);
- stv0900_set_ts_parallel_serial(state->internal, p_init->path1_ts_clock, p_init->path2_ts_clock);
+ state->internal->ts_config = p_init->ts_config;
+ if (state->internal->ts_config == NULL)
+ stv0900_set_ts_parallel_serial(state->internal,
+ p_init->path1_ts_clock,
+ p_init->path2_ts_clock);
+ else {
+ for (i = 0; state->internal->ts_config[i].addr != 0xffff; i++)
+ stv0900_write_reg(state->internal,
+ state->internal->ts_config[i].addr,
+ state->internal->ts_config[i].val);
+
+ stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 1);
+ stv0900_write_bits(state->internal, F0900_P2_RST_HWARE, 0);
+ stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 1);
+ stv0900_write_bits(state->internal, F0900_P1_RST_HWARE, 0);
+ }
+
stv0900_write_bits(state->internal, F0900_P1_TUN_MADDRESS, p_init->tun1_maddress);
switch (p_init->tuner1_adc) {
case 1:
@@ -1882,6 +1936,7 @@
.read_ber = stv0900_read_ber,
.read_signal_strength = stv0900_read_signal_strength,
.read_snr = stv0900_read_snr,
+ .read_ucblocks = stv0900_read_ucblocks,
};
struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
@@ -1915,6 +1970,7 @@
init_params.tun1_iq_inversion = STV0900_IQ_NORMAL;
init_params.tuner1_adc = config->tun1_adc;
init_params.path2_ts_clock = config->path2_mode;
+ init_params.ts_config = config->ts_config_regs;
init_params.tun2_maddress = config->tun2_maddress;
init_params.tuner2_adc = config->tun2_adc;
init_params.tun2_iq_inversion = STV0900_IQ_SWAPPED;
diff --git a/drivers/media/dvb/frontends/stv0900_priv.h b/drivers/media/dvb/frontends/stv0900_priv.h
index 67dc8ec..5ed7a14 100644
--- a/drivers/media/dvb/frontends/stv0900_priv.h
+++ b/drivers/media/dvb/frontends/stv0900_priv.h
@@ -271,6 +271,7 @@
/* IQ from the tuner2 to the demod */
enum stv0900_iq_inversion tun2_iq_inversion;
+ struct stv0900_reg *ts_config;
};
struct stv0900_search_params {
@@ -363,6 +364,7 @@
u8 i2c_addr;
u8 clkmode;/* 0 for CLKI, 2 for XTALI */
u8 chip_id;
+ struct stv0900_reg *ts_config;
enum fe_stv0900_error errs;
int dmds_used;
};
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index 96ef745..488bdfb 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -2674,7 +2674,7 @@
static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
{
- struct stv090x_short_frame_crloop *short_crl;
+ struct stv090x_short_frame_crloop *short_crl = NULL;
s32 index = 0;
u8 aclc = 0x0b;
@@ -2694,10 +2694,13 @@
break;
}
- if (state->dev_ver >= 0x30)
- short_crl = stv090x_s2_short_crl_cut20;
- else if (state->dev_ver >= 0x20)
+ if (state->dev_ver >= 0x30) {
+ /* Cut 3.0 and up */
short_crl = stv090x_s2_short_crl_cut30;
+ } else {
+ /* Cut 2.0 and up: we don't support cuts older than 2.0 */
+ short_crl = stv090x_s2_short_crl_cut20;
+ }
if (state->srate <= 3000000)
aclc = short_crl[index].crl_2;
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 4302c56..cc8862c 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -210,6 +210,7 @@
{ TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 },
+ { TDA10048_CLK_16000, TDA10048_IF_3800, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 },
{ TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 },
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 32be382..a246903 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1422,8 +1422,8 @@
struct smscore_gpio_config *pGpioConfig) {
u32 totalLen;
- u32 TranslatedPinNum;
- u32 GroupNum;
+ u32 TranslatedPinNum = 0;
+ u32 GroupNum = 0;
u32 ElectricChar;
u32 groupCfg;
void *buffer;
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 3936238..3cd76dd 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -322,7 +322,9 @@
v->rangehigh = FREQ_MAX * FREQ_MUL;
v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
if (r->tunchk & TEA5764_TUNCHK_STEREO)
- v->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ v->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ else
+ v->rxsubchans = V4L2_TUNER_SUB_MONO;
v->audmode = tea5764_get_audout_mode(radio);
v->signal = TEA5764_TUNCHK_LEVEL(r->tunchk) * 0xffff / 0xf;
v->afc = TEA5764_TUNCHK_IFCNT(r->tunchk);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 94f4405..061e147 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -866,9 +866,13 @@
module will be called w9968cf.
config USB_OV511
- tristate "USB OV511 Camera support"
+ tristate "USB OV511 Camera support (DEPRECATED)"
depends on VIDEO_V4L1
---help---
+ This driver is DEPRECATED please use the gspca ov519 module
+ instead. Note that for the ov511 / ov518 support of the gspca module
+ you need atleast version 0.6.0 of libv4l.
+
Say Y here if you want to connect this type of camera to your
computer's USB port. See <file:Documentation/video4linux/ov511.txt>
for more information and for a list of supported cameras.
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 8e35c3a..5136df1 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -61,6 +61,8 @@
switch (qctrl->id) {
/* Standard V4L2 controls */
+ case V4L2_CID_USER_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_HUE:
case V4L2_CID_SATURATION:
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 6a94640..28f48f4 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -1052,22 +1052,13 @@
/* Set resolution of the video */
int cx231xx_resolution_set(struct cx231xx *dev)
{
- int width, height;
- u32 hscale, vscale;
- int status = 0;
-
- width = dev->width;
- height = dev->height;
-
- get_scale(dev, width, height, &hscale, &vscale);
-
/* set horzontal scale */
- status = vid_blk_write_word(dev, HSCALE_CTRL, hscale);
+ int status = vid_blk_write_word(dev, HSCALE_CTRL, dev->hscale);
+ if (status)
+ return status;
/* set vertical scale */
- status = vid_blk_write_word(dev, VSCALE_CTRL, vscale);
-
- return status;
+ return vid_blk_write_word(dev, VSCALE_CTRL, dev->vscale);
}
/******************************************************************************
@@ -2055,7 +2046,7 @@
int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type)
{
- int rc;
+ int rc = -1;
u32 ep_mask = -1;
struct pcb_config *pcb_config;
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index a23ae73..609bae6 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -893,9 +893,9 @@
return 0;
}
-void get_scale(struct cx231xx *dev,
- unsigned int width, unsigned int height,
- unsigned int *hscale, unsigned int *vscale)
+static void get_scale(struct cx231xx *dev,
+ unsigned int width, unsigned int height,
+ unsigned int *hscale, unsigned int *vscale)
{
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
@@ -907,10 +907,6 @@
*vscale = (((unsigned long)maxh) << 12) / height - 4096L;
if (*vscale >= 0x4000)
*vscale = 0x3fff;
-
- dev->hscale = *hscale;
- dev->vscale = *vscale;
-
}
/* ------------------------------------------------------------------
@@ -955,8 +951,8 @@
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- int width = f->fmt.pix.width;
- int height = f->fmt.pix.height;
+ unsigned int width = f->fmt.pix.width;
+ unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
unsigned int hscale, vscale;
@@ -971,17 +967,7 @@
/* width must even because of the YUYV format
height must be even because of interlacing */
- height &= 0xfffe;
- width &= 0xfffe;
-
- if (unlikely(height < 32))
- height = 32;
- if (unlikely(height > maxh))
- height = maxh;
- if (unlikely(width < 48))
- width = 48;
- if (unlikely(width > maxw))
- width = maxw;
+ v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
get_scale(dev, width, height, &hscale, &vscale);
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index e38eb2d..a0f823a 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -722,9 +722,6 @@
int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u8 input);
int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev);
int cx231xx_set_audio_input(struct cx231xx *dev, u8 input);
-void get_scale(struct cx231xx *dev,
- unsigned int width, unsigned int height,
- unsigned int *hscale, unsigned int *vscale);
/* Provided by cx231xx-video.c */
int cx231xx_register_extension(struct cx231xx_ops *dev);
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 8ded529..4c8e958 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -500,6 +500,8 @@
int err;
switch (qctrl->id) {
+ case V4L2_CID_MPEG_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_MPEG_STREAM_TYPE:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index e236df2..48a9751 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -45,6 +45,7 @@
#include "dibx000_common.h"
#include "zl10353.h"
#include "stv0900.h"
+#include "stv0900_reg.h"
#include "stv6110.h"
#include "lnbh24.h"
#include "cx24116.h"
@@ -242,12 +243,22 @@
.if_lvl = 6, .rfagc_top = 0x37 },
};
+static struct tda18271_std_map hauppauge_hvr1200_tda18271_std_map = {
+ .dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+ .dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6,
+ .if_lvl = 1, .rfagc_top = 0x37, },
+};
+
static struct tda18271_config hauppauge_tda18271_config = {
.std_map = &hauppauge_tda18271_std_map,
.gate = TDA18271_GATE_ANALOG,
};
static struct tda18271_config hauppauge_hvr1200_tuner_config = {
+ .std_map = &hauppauge_hvr1200_tda18271_std_map,
.gate = TDA18271_GATE_ANALOG,
};
@@ -370,13 +381,25 @@
.disable_i2c_gate_ctrl = 1,
};
+static struct stv0900_reg stv0900_ts_regs[] = {
+ { R0900_TSGENERAL, 0x00 },
+ { R0900_P1_TSSPEED, 0x40 },
+ { R0900_P2_TSSPEED, 0x40 },
+ { R0900_P1_TSCFGM, 0xc0 },
+ { R0900_P2_TSCFGM, 0xc0 },
+ { R0900_P1_TSCFGH, 0xe0 },
+ { R0900_P2_TSCFGH, 0xe0 },
+ { R0900_P1_TSCFGL, 0x20 },
+ { R0900_P2_TSCFGL, 0x20 },
+ { 0xffff, 0xff }, /* terminate */
+};
+
static struct stv0900_config netup_stv0900_config = {
.demod_address = 0x68,
.xtal = 27000000,
.clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */
.diseqc_mode = 2,/* 2/3 PWM */
- .path1_mode = 2,/*Serial continues clock */
- .path2_mode = 2,/*Serial continues clock */
+ .ts_config_regs = stv0900_ts_regs,
.tun1_maddress = 0,/* 0x60 */
.tun2_maddress = 3,/* 0x63 */
.tun1_adc = 1,/* 1 Vpp */
@@ -736,7 +759,8 @@
if (!dvb_attach(lnbh24_attach,
fe0->dvb.frontend,
&i2c_bus->i2c_adap,
- LNBH24_PCL, 0, 0x09))
+ LNBH24_PCL,
+ LNBH24_TTX, 0x09))
printk(KERN_ERR
"No LNBH24 found!\n");
@@ -756,7 +780,8 @@
if (!dvb_attach(lnbh24_attach,
fe0->dvb.frontend,
&i2c_bus->i2c_adap,
- LNBH24_PCL, 0, 0x0a))
+ LNBH24_PCL,
+ LNBH24_TTX, 0x0a))
printk(KERN_ERR
"No LNBH24 found!\n");
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 66bbd2e..70836af 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -963,15 +963,8 @@
}
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 94b7a52..a5cc1c1 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1524,33 +1524,45 @@
},
.mpeg = CX88_MPEG_DVB,
},
+ /* Terry Wu <terrywu2009@gmail.com> */
+ /* TV Audio : set GPIO 2, 18, 19 value to 0, 1, 0 */
+ /* FM Audio : set GPIO 2, 18, 19 value to 0, 0, 0 */
+ /* Line-in Audio : set GPIO 2, 18, 19 value to 0, 1, 1 */
+ /* Mute Audio : set GPIO 2 value to 1 */
[CX88_BOARD_WINFAST_TV2000_XP_GLOBAL] = {
- .name = "Winfast TV2000 XP Global",
+ .name = "Leadtek TV2000 XP Global",
.tuner_type = TUNER_XC2028,
.tuner_addr = 0x61,
+ .radio_type = TUNER_XC2028,
+ .radio_addr = 0x61,
.input = { {
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
- .gpio0 = 0x0400, /* pin 2:mute = 0 (off?) */
+ .gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
- .gpio2 = 0x0800, /* pin 19:audio = 0 (tv) */
-
+ .gpio2 = 0x0C04, /* pin 18 = 1, pin 19 = 0 */
+ .gpio3 = 0x0000,
}, {
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
- .gpio0 = 0x0400, /* probably? or 0x0404 to turn mute on */
+ .gpio0 = 0x0400, /* pin 2 = 0 */
.gpio1 = 0x0000,
- .gpio2 = 0x0808, /* pin 19:audio = 1 (line) */
-
+ .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
+ .gpio3 = 0x0000,
}, {
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C0C, /* pin 18 = 1, pin 19 = 1 */
+ .gpio3 = 0x0000,
} },
.radio = {
.type = CX88_RADIO,
- .gpio0 = 0x004ff,
- .gpio1 = 0x010ff,
- .gpio2 = 0x0ff,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x0000,
+ .gpio2 = 0x0C00, /* pin 18 = 0, pin 19 = 0 */
+ .gpio3 = 0x0000,
},
},
[CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
@@ -2438,6 +2450,41 @@
.subvendor = 0x107d,
.subdevice = 0x6654,
.card = CX88_BOARD_WINFAST_DTV1800H,
+ }, {
+ /* PVR2000 PAL Model [107d:6630] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6630,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 PAL Model [107d:6638] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6638,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:6631] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6631,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:6637] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6637,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* PVR2000 NTSC Model [107d:663d] */
+ .subvendor = 0x107d,
+ .subdevice = 0x663d,
+ .card = CX88_BOARD_LEADTEK_PVR2000,
+ }, {
+ /* DV2000 NTSC Model [107d:6621] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6621,
+ .card = CX88_BOARD_WINFAST_DV2000,
+ }, {
+ /* TV2000 XP Global [107d:6618] */
+ .subvendor = 0x107d,
+ .subdevice = 0x6618,
+ .card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
},
};
@@ -2446,12 +2493,6 @@
static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
- /* This is just for the "Winfast 2000XP Expert" board ATM; I don't have data on
- * any others.
- *
- * Byte 0 is 1 on the NTSC board.
- */
-
if (eeprom_data[4] != 0x7d ||
eeprom_data[5] != 0x10 ||
eeprom_data[7] != 0x66) {
@@ -2459,8 +2500,19 @@
return;
}
- core->board.tuner_type = (eeprom_data[6] == 0x13) ?
- TUNER_PHILIPS_FM1236_MK3 : TUNER_PHILIPS_FM1216ME_MK3;
+ /* Terry Wu <terrywu2009@gmail.com> */
+ switch (eeprom_data[6]) {
+ case 0x13: /* SSID 6613 for TV2000 XP Expert NTSC Model */
+ case 0x21: /* SSID 6621 for DV2000 NTSC Model */
+ case 0x31: /* SSID 6631 for PVR2000 NTSC Model */
+ case 0x37: /* SSID 6637 for PVR2000 NTSC Model */
+ case 0x3d: /* SSID 6637 for PVR2000 NTSC Model */
+ core->board.tuner_type = TUNER_PHILIPS_FM1236_MK3;
+ break;
+ default:
+ core->board.tuner_type = TUNER_PHILIPS_FM1216ME_MK3;
+ break;
+ }
info_printk(core, "Leadtek Winfast 2000XP Expert config: "
"tuner=%d, eeprom[0]=0x%02x\n",
@@ -2713,7 +2765,6 @@
{
/* Board-specific callbacks */
switch (core->boardnr) {
- case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_POWERCOLOR_REAL_ANGEL:
case CX88_BOARD_GENIATECH_X8000_MT:
case CX88_BOARD_KWORLD_ATSC_120:
@@ -2725,6 +2776,7 @@
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
return cx88_xc3028_winfast1800h_callback(core, command, arg);
}
@@ -2914,6 +2966,7 @@
udelay(1000);
break;
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
/* GPIO 12 (xc3028 tuner reset) */
cx_set(MO_GP1_IO, 0x1010);
@@ -2950,6 +3003,7 @@
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
ctl->demod = XC3028_FE_OREN538;
break;
+ case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
/*
@@ -2993,6 +3047,8 @@
if (0 == core->i2c_rc)
gdi_eeprom(core, eeprom);
break;
+ case CX88_BOARD_LEADTEK_PVR2000:
+ case CX88_BOARD_WINFAST_DV2000:
case CX88_BOARD_WINFAST2000XP_EXPERT:
if (0 == core->i2c_rc)
leadtek_eeprom(core, eeprom);
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 0ccac70..b127708 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1111,15 +1111,8 @@
}
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 00cc791..c43fdb9 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -139,6 +139,24 @@
{ -1, -1, -1, -1},
};
+/* Evga inDtube
+ GPIO0 - Enable digital power (s5h1409) - low to enable
+ GPIO1 - Enable analog power (tvp5150/emp202) - low to enable
+ GPIO4 - xc3028 reset
+ GOP3 - s5h1409 reset
+ */
+static struct em28xx_reg_seq evga_indtube_analog[] = {
+ {EM28XX_R08_GPIO, 0x79, 0xff, 60},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq evga_indtube_digital[] = {
+ {EM28XX_R08_GPIO, 0x7a, 0xff, 1},
+ {EM2880_R04_GPO, 0x04, 0xff, 10},
+ {EM2880_R04_GPO, 0x0c, 0xff, 1},
+ { -1, -1, -1, -1},
+};
+
/* Callback for the most boards */
static struct em28xx_reg_seq default_tuner_gpio[] = {
{EM28XX_R08_GPIO, EM_GPIO_4, EM_GPIO_4, 10},
@@ -1449,6 +1467,33 @@
.gpio = terratec_av350_unmute_gpio,
} },
},
+ [EM2882_BOARD_EVGA_INDTUBE] = {
+ .name = "Evga inDtube",
+ .tuner_type = TUNER_XC2028,
+ .tuner_gpio = default_tuner_gpio,
+ .decoder = EM28XX_TVP5150,
+ .xclk = EM28XX_XCLK_FREQUENCY_12MHZ, /* NEC IR */
+ .mts_firmware = 1,
+ .has_dvb = 1,
+ .dvb_gpio = evga_indtube_digital,
+ .ir_codes = ir_codes_evga_indtube,
+ .input = { {
+ .type = EM28XX_VMUX_TELEVISION,
+ .vmux = TVP5150_COMPOSITE0,
+ .amux = EM28XX_AMUX_VIDEO,
+ .gpio = evga_indtube_analog,
+ }, {
+ .type = EM28XX_VMUX_COMPOSITE1,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = evga_indtube_analog,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .vmux = TVP5150_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ .gpio = evga_indtube_analog,
+ } },
+ },
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1571,6 +1616,7 @@
{0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF},
{0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
{0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028},
+ {0xcee44a99, EM2882_BOARD_EVGA_INDTUBE, TUNER_XC2028},
};
/* I2C devicelist hash table for devices with generic USB IDs */
@@ -1834,6 +1880,10 @@
ctl->demod = XC3028_FE_CHINA;
ctl->fname = XC2028_DEFAULT_FIRMWARE;
break;
+ case EM2882_BOARD_EVGA_INDTUBE:
+ ctl->demod = XC3028_FE_CHINA;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ break;
default:
ctl->demod = XC3028_FE_OREN538;
}
@@ -2101,6 +2151,12 @@
case EM2880_BOARD_MSI_DIGIVOX_AD:
if (!em28xx_hint_board(dev))
em28xx_set_model(dev);
+
+ /* In cases where we had to use a board hint, the call to
+ em28xx_set_mode() in em28xx_pre_card_setup() was a no-op,
+ so make the call now so the analog GPIOs are set properly
+ before probing the i2c bus. */
+ em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
break;
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 563dd2b..e7b47c8 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -445,6 +445,7 @@
}
break;
case EM2883_BOARD_KWORLD_HYBRID_330U:
+ case EM2882_BOARD_EVGA_INDTUBE:
dvb->frontend = dvb_attach(s5h1409_attach,
&em28xx_s5h1409_with_xc3028,
&dev->i2c_adap);
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 882796e..8fe1bee 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -687,8 +687,8 @@
{
struct em28xx_fh *fh = priv;
struct em28xx *dev = fh->dev;
- int width = f->fmt.pix.width;
- int height = f->fmt.pix.height;
+ unsigned int width = f->fmt.pix.width;
+ unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
unsigned int maxh = norm_maxh(dev);
unsigned int hscale, vscale;
@@ -701,34 +701,20 @@
return -EINVAL;
}
- /* width must even because of the YUYV format
- height must be even because of interlacing */
- height &= 0xfffe;
- width &= 0xfffe;
-
- if (unlikely(height < 32))
- height = 32;
- if (unlikely(height > maxh))
- height = maxh;
- if (unlikely(width < 48))
- width = 48;
- if (unlikely(width > maxw))
- width = maxw;
-
if (dev->board.is_em2800) {
/* the em2800 can only scale down to 50% */
- if (height % (maxh / 2))
- height = maxh;
- if (width % (maxw / 2))
- width = maxw;
- /* according to empiatech support */
- /* the MaxPacketSize is to small to support */
- /* framesizes larger than 640x480 @ 30 fps */
- /* or 640x576 @ 25 fps. As this would cut */
- /* of a part of the image we prefer */
- /* 360x576 or 360x480 for now */
+ height = height > (3 * maxh / 4) ? maxh : maxh / 2;
+ width = width > (3 * maxw / 4) ? maxw : maxw / 2;
+ /* According to empiatech support the MaxPacketSize is too small
+ * to support framesizes larger than 640x480 @ 30 fps or 640x576
+ * @ 25 fps. As this would cut of a part of the image we prefer
+ * 360x576 or 360x480 for now */
if (width == maxw && height == maxh)
width /= 2;
+ } else {
+ /* width must even because of the YUYV format
+ height must be even because of interlacing */
+ v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh, 1, 0);
}
get_scale(dev, width, height, &hscale, &vscale);
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 8bf81be..813ce45 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -106,6 +106,7 @@
#define EM2860_BOARD_TERRATEC_GRABBY 67
#define EM2860_BOARD_TERRATEC_AV350 68
#define EM2882_BOARD_KWORLD_ATSC_315U 69
+#define EM2882_BOARD_EVGA_INDTUBE 70
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index f7e0355..1e89600 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1042,13 +1042,11 @@
for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
if (gspca_dev->ctrl_dis & (1 << i))
continue;
- if (ctrls->qctrl.id < id)
+ if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id)
continue;
- if (ctrls != NULL) {
- if (gspca_dev->sd_desc->ctrls[i].qctrl.id
+ if (ctrls && gspca_dev->sd_desc->ctrls[i].qctrl.id
> ctrls->qctrl.id)
- continue;
- }
+ continue;
ctrls = &gspca_dev->sd_desc->ctrls[i];
}
} else {
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 188866a..2f6e135 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -50,12 +50,18 @@
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ __u8 packet_nr;
+
char bridge;
#define BRIDGE_OV511 0
#define BRIDGE_OV511PLUS 1
#define BRIDGE_OV518 2
#define BRIDGE_OV518PLUS 3
#define BRIDGE_OV519 4
+#define BRIDGE_MASK 7
+
+ char invert_led;
+#define BRIDGE_INVERT_LED 8
/* Determined by sensor type */
__u8 sif;
@@ -65,22 +71,25 @@
__u8 colors;
__u8 hflip;
__u8 vflip;
+ __u8 autobrightness;
+ __u8 freq;
__u8 stopped; /* Streaming is temporarily paused */
- __u8 frame_rate; /* current Framerate (OV519 only) */
- __u8 clockdiv; /* clockdiv override for OV519 only */
+ __u8 frame_rate; /* current Framerate */
+ __u8 clockdiv; /* clockdiv override */
char sensor; /* Type of image sensor chip (SEN_*) */
#define SEN_UNKNOWN 0
#define SEN_OV6620 1
#define SEN_OV6630 2
-#define SEN_OV7610 3
-#define SEN_OV7620 4
-#define SEN_OV7640 5
-#define SEN_OV7670 6
-#define SEN_OV76BE 7
-#define SEN_OV8610 8
+#define SEN_OV66308AF 3
+#define SEN_OV7610 4
+#define SEN_OV7620 5
+#define SEN_OV7640 6
+#define SEN_OV7670 7
+#define SEN_OV76BE 8
+#define SEN_OV8610 9
};
/* V4L2 controls supported by the driver */
@@ -94,11 +103,17 @@
static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setcolors(struct gspca_dev *gspca_dev);
+static void setautobrightness(struct sd *sd);
+static void setfreq(struct sd *sd);
-static struct ctrl sd_ctrls[] = {
+static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -141,7 +156,7 @@
.set = sd_setcolors,
.get = sd_getcolors,
},
-/* next controls work with ov7670 only */
+/* The flip controls work with ov7670 only */
#define HFLIP_IDX 3
{
{
@@ -172,6 +187,51 @@
.set = sd_setvflip,
.get = sd_getvflip,
},
+#define AUTOBRIGHT_IDX 5
+ {
+ {
+ .id = V4L2_CID_AUTOBRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto Brightness",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+#define AUTOBRIGHT_DEF 1
+ .default_value = AUTOBRIGHT_DEF,
+ },
+ .set = sd_setautobrightness,
+ .get = sd_getautobrightness,
+ },
+#define FREQ_IDX 6
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
+ .step = 1,
+#define FREQ_DEF 0
+ .default_value = FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
+#define OV7670_FREQ_IDX 7
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 3, /* 0: 0, 1: 50Hz, 2:60Hz 3: Auto Hz */
+ .step = 1,
+#define OV7670_FREQ_DEF 3
+ .default_value = OV7670_FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
};
static const struct v4l2_pix_format ov519_vga_mode[] = {
@@ -187,11 +247,21 @@
.priv = 0},
};
static const struct v4l2_pix_format ov519_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
+ {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3 / 8 + 590,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
{352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 8 + 590,
@@ -199,42 +269,118 @@
.priv = 0},
};
+/* Note some of the sizeimage values for the ov511 / ov518 may seem
+ larger then necessary, however they need to be this big as the ov511 /
+ ov518 always fills the entire isoc frame, using 0 padding bytes when
+ it doesn't have any data. So with low framerates the amount of data
+ transfered can become quite large (libv4l will remove all the 0 padding
+ in userspace). */
static const struct v4l2_pix_format ov518_vga_mode[] = {
{320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 320,
- .sizeimage = 320 * 240 * 3 / 8 + 590,
+ .sizeimage = 320 * 240 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 640,
- .sizeimage = 640 * 480 * 3 / 8 + 590,
+ .sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format ov518_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
{176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 176,
- .sizeimage = 40000,
+ .sizeimage = 70000,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
+ {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
{352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 352,
- .sizeimage = 352 * 288 * 3 / 8 + 590,
+ .sizeimage = 352 * 288 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
+static const struct v4l2_pix_format ov511_vga_mode[] = {
+ {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {640, 480, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480 * 2,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 0},
+};
+static const struct v4l2_pix_format ov511_sif_mode[] = {
+ {160, 120, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 3},
+ {176, 144, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 70000,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 1},
+ {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 2},
+ {352, 288, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288 * 3,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .priv = 0},
+};
/* Registers common to OV511 / OV518 */
+#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
#define R51x_SYS_RESET 0x50
+ /* Reset type flags */
+ #define OV511_RESET_OMNICE 0x08
#define R51x_SYS_INIT 0x53
#define R51x_SYS_SNAP 0x52
#define R51x_SYS_CUST_ID 0x5F
#define R51x_COMP_LUT_BEGIN 0x80
/* OV511 Camera interface register numbers */
+#define R511_CAM_DELAY 0x10
+#define R511_CAM_EDGE 0x11
+#define R511_CAM_PXCNT 0x12
+#define R511_CAM_LNCNT 0x13
+#define R511_CAM_PXDIV 0x14
+#define R511_CAM_LNDIV 0x15
+#define R511_CAM_UV_EN 0x16
+#define R511_CAM_LINE_MODE 0x17
+#define R511_CAM_OPTS 0x18
+
+#define R511_SNAP_FRAME 0x19
+#define R511_SNAP_PXCNT 0x1A
+#define R511_SNAP_LNCNT 0x1B
+#define R511_SNAP_PXDIV 0x1C
+#define R511_SNAP_LNDIV 0x1D
+#define R511_SNAP_UV_EN 0x1E
+#define R511_SNAP_UV_EN 0x1E
+#define R511_SNAP_OPTS 0x1F
+
+#define R511_DRAM_FLOW_CTL 0x20
+#define R511_FIFO_OPTS 0x31
+#define R511_I2C_CTL 0x40
#define R511_SYS_LED_CTL 0x55 /* OV511+ only */
-#define OV511_RESET_NOREGS 0x3F /* All but OV511 & regs */
+#define R511_COMP_EN 0x78
+#define R511_COMP_LUT_EN 0x79
/* OV518 Camera interface register numbers */
#define R518_GPIO_OUT 0x56 /* OV518(+) only */
@@ -383,7 +529,7 @@
{ 0x28, 0x05 },
{ 0x2a, 0x04 }, /* Disable framerate adjust */
/* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */
- { 0x2d, 0x99 },
+ { 0x2d, 0x85 },
{ 0x33, 0xa0 }, /* Color Processing Parameter */
{ 0x34, 0xd2 }, /* Max A/D range */
{ 0x38, 0x8b },
@@ -416,7 +562,7 @@
{ 0x07, 0x2d }, /* Sharpness */
{ 0x0c, 0x20 },
{ 0x0d, 0x20 },
- { 0x0e, 0x20 },
+ { 0x0e, 0xa0 }, /* Was 0x20, bit7 enables a 2x gain which we need */
{ 0x0f, 0x05 },
{ 0x10, 0x9a },
{ 0x11, 0x00 }, /* Pixel clock = fastest */
@@ -558,7 +704,7 @@
{ 0x23, 0x00 },
{ 0x26, 0xa2 },
{ 0x27, 0xea },
- { 0x28, 0x20 },
+ { 0x28, 0x22 }, /* Was 0x20, bit1 enables a 2x gain which we need */
{ 0x29, 0x00 },
{ 0x2a, 0x10 },
{ 0x2b, 0x00 },
@@ -999,13 +1145,128 @@
return ret;
}
+static int ov511_i2c_w(struct sd *sd, __u8 reg, __u8 value)
+{
+ int rc, retries;
+
+ PDEBUG(D_USBO, "i2c 0x%02x -> [0x%02x]", value, reg);
+
+ /* Three byte write cycle */
+ for (retries = 6; ; ) {
+ /* Select camera register */
+ rc = reg_w(sd, R51x_I2C_SADDR_3, reg);
+ if (rc < 0)
+ return rc;
+
+ /* Write "value" to I2C data port of OV511 */
+ rc = reg_w(sd, R51x_I2C_DATA, value);
+ if (rc < 0)
+ return rc;
+
+ /* Initiate 3-byte write cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x01);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+ if (--retries < 0) {
+ PDEBUG(D_USBO, "i2c write retries exhausted");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int ov511_i2c_r(struct sd *sd, __u8 reg)
+{
+ int rc, value, retries;
+
+ /* Two byte write cycle */
+ for (retries = 6; ; ) {
+ /* Select camera register */
+ rc = reg_w(sd, R51x_I2C_SADDR_2, reg);
+ if (rc < 0)
+ return rc;
+
+ /* Initiate 2-byte write cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x03);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+
+ /* I2C abort */
+ reg_w(sd, R511_I2C_CTL, 0x10);
+
+ if (--retries < 0) {
+ PDEBUG(D_USBI, "i2c write retries exhausted");
+ return -1;
+ }
+ }
+
+ /* Two byte read cycle */
+ for (retries = 6; ; ) {
+ /* Initiate 2-byte read cycle */
+ rc = reg_w(sd, R511_I2C_CTL, 0x05);
+ if (rc < 0)
+ return rc;
+
+ do
+ rc = reg_r(sd, R511_I2C_CTL);
+ while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
+
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 0) /* Ack? */
+ break;
+
+ /* I2C abort */
+ rc = reg_w(sd, R511_I2C_CTL, 0x10);
+ if (rc < 0)
+ return rc;
+
+ if (--retries < 0) {
+ PDEBUG(D_USBI, "i2c read retries exhausted");
+ return -1;
+ }
+ }
+
+ value = reg_r(sd, R51x_I2C_DATA);
+
+ PDEBUG(D_USBI, "i2c [0x%02X] -> 0x%02X", reg, value);
+
+ /* This is needed to make i2c_w() work */
+ rc = reg_w(sd, R511_I2C_CTL, 0x05);
+ if (rc < 0)
+ return rc;
+
+ return value;
+}
/*
* The OV518 I2C I/O procedure is different, hence, this function.
* This is normally only called from i2c_w(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
-static int i2c_w(struct sd *sd,
+static int ov518_i2c_w(struct sd *sd,
__u8 reg,
__u8 value)
{
@@ -1040,7 +1301,7 @@
* This is normally only called from i2c_r(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
-static int i2c_r(struct sd *sd, __u8 reg)
+static int ov518_i2c_r(struct sd *sd, __u8 reg)
{
int rc, value;
@@ -1063,6 +1324,34 @@
return value;
}
+static int i2c_w(struct sd *sd, __u8 reg, __u8 value)
+{
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ return ov511_i2c_w(sd, reg, value);
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ case BRIDGE_OV519:
+ return ov518_i2c_w(sd, reg, value);
+ }
+ return -1; /* Should never happen */
+}
+
+static int i2c_r(struct sd *sd, __u8 reg)
+{
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ return ov511_i2c_r(sd, reg);
+ case BRIDGE_OV518:
+ case BRIDGE_OV518PLUS:
+ case BRIDGE_OV519:
+ return ov518_i2c_r(sd, reg);
+ }
+ return -1; /* Should never happen */
+}
+
/* Writes bits at positions specified by mask to an I2C reg. Bits that are in
* the same position as 1's in "mask" are cleared and set to "value". Bits
* that are in the same position as 0's in "mask" are preserved, regardless
@@ -1242,7 +1531,6 @@
}
/* Set sensor-specific vars */
-/* sd->sif = 0; already done */
return 0;
}
@@ -1279,15 +1567,13 @@
}
} else if ((rc & 3) == 1) {
/* I don't know what's different about the 76BE yet. */
- if (i2c_r(sd, 0x15) & 1)
+ if (i2c_r(sd, 0x15) & 1) {
PDEBUG(D_PROBE, "Sensor is an OV7620AE");
- else
+ sd->sensor = SEN_OV7620;
+ } else {
PDEBUG(D_PROBE, "Sensor is an OV76BE");
-
- /* OV511+ will return all zero isoc data unless we
- * configure the sensor as a 7620. Someone needs to
- * find the exact reg. setting that causes this. */
- sd->sensor = SEN_OV76BE;
+ sd->sensor = SEN_OV76BE;
+ }
} else if ((rc & 3) == 0) {
/* try to read product id registers */
high = i2c_r(sd, 0x0a);
@@ -1333,7 +1619,6 @@
}
/* Set sensor-specific vars */
-/* sd->sif = 0; already done */
return 0;
}
@@ -1362,13 +1647,14 @@
break;
case 0x01:
sd->sensor = SEN_OV6620;
+ PDEBUG(D_PROBE, "Sensor is an OV6620");
break;
case 0x02:
sd->sensor = SEN_OV6630;
PDEBUG(D_PROBE, "Sensor is an OV66308AE");
break;
case 0x03:
- sd->sensor = SEN_OV6630;
+ sd->sensor = SEN_OV66308AF;
PDEBUG(D_PROBE, "Sensor is an OV66308AF");
break;
case 0x90:
@@ -1391,6 +1677,9 @@
/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
static void ov51x_led_control(struct sd *sd, int on)
{
+ if (sd->invert_led)
+ on = !on;
+
switch (sd->bridge) {
/* OV511 has no LED control */
case BRIDGE_OV511PLUS:
@@ -1406,9 +1695,31 @@
}
}
-/* OV518 quantization tables are 8x4 (instead of 8x8) */
-static int ov518_upload_quan_tables(struct sd *sd)
+static int ov51x_upload_quan_tables(struct sd *sd)
{
+ const unsigned char yQuanTable511[] = {
+ 0, 1, 1, 2, 2, 3, 3, 4,
+ 1, 1, 1, 2, 2, 3, 4, 4,
+ 1, 1, 2, 2, 3, 4, 4, 4,
+ 2, 2, 2, 3, 4, 4, 4, 4,
+ 2, 2, 3, 4, 4, 5, 5, 5,
+ 3, 3, 4, 4, 5, 5, 5, 5,
+ 3, 4, 4, 4, 5, 5, 5, 5,
+ 4, 4, 4, 4, 5, 5, 5, 5
+ };
+
+ const unsigned char uvQuanTable511[] = {
+ 0, 2, 2, 3, 4, 4, 4, 4,
+ 2, 2, 2, 4, 4, 4, 4, 4,
+ 2, 2, 3, 4, 4, 4, 4, 4,
+ 3, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4
+ };
+
+ /* OV518 quantization tables are 8x4 (instead of 8x8) */
const unsigned char yQuanTable518[] = {
5, 4, 5, 6, 6, 7, 7, 7,
5, 5, 5, 5, 6, 7, 7, 7,
@@ -1423,14 +1734,23 @@
7, 7, 7, 7, 7, 7, 8, 8
};
- const unsigned char *pYTable = yQuanTable518;
- const unsigned char *pUVTable = uvQuanTable518;
+ const unsigned char *pYTable, *pUVTable;
unsigned char val0, val1;
- int i, rc, reg = R51x_COMP_LUT_BEGIN;
+ int i, size, rc, reg = R51x_COMP_LUT_BEGIN;
PDEBUG(D_PROBE, "Uploading quantization tables");
- for (i = 0; i < 16; i++) {
+ if (sd->bridge == BRIDGE_OV511 || sd->bridge == BRIDGE_OV511PLUS) {
+ pYTable = yQuanTable511;
+ pUVTable = uvQuanTable511;
+ size = 32;
+ } else {
+ pYTable = yQuanTable518;
+ pUVTable = uvQuanTable518;
+ size = 16;
+ }
+
+ for (i = 0; i < size; i++) {
val0 = *pYTable++;
val1 = *pYTable++;
val0 &= 0x0f;
@@ -1445,7 +1765,7 @@
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
- rc = reg_w(sd, reg + 16, val0);
+ rc = reg_w(sd, reg + size, val0);
if (rc < 0)
return rc;
@@ -1455,6 +1775,87 @@
return 0;
}
+/* This initializes the OV511/OV511+ and the sensor */
+static int ov511_configure(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int rc;
+
+ /* For 511 and 511+ */
+ const struct ov_regvals init_511[] = {
+ { R51x_SYS_RESET, 0x7f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x7f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x3f },
+ { R51x_SYS_INIT, 0x01 },
+ { R51x_SYS_RESET, 0x3d },
+ };
+
+ const struct ov_regvals norm_511[] = {
+ { R511_DRAM_FLOW_CTL, 0x01 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R51x_SYS_SNAP, 0x02 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R511_FIFO_OPTS, 0x1f },
+ { R511_COMP_EN, 0x00 },
+ { R511_COMP_LUT_EN, 0x03 },
+ };
+
+ const struct ov_regvals norm_511_p[] = {
+ { R511_DRAM_FLOW_CTL, 0xff },
+ { R51x_SYS_SNAP, 0x00 },
+ { R51x_SYS_SNAP, 0x02 },
+ { R51x_SYS_SNAP, 0x00 },
+ { R511_FIFO_OPTS, 0xff },
+ { R511_COMP_EN, 0x00 },
+ { R511_COMP_LUT_EN, 0x03 },
+ };
+
+ const struct ov_regvals compress_511[] = {
+ { 0x70, 0x1f },
+ { 0x71, 0x05 },
+ { 0x72, 0x06 },
+ { 0x73, 0x06 },
+ { 0x74, 0x14 },
+ { 0x75, 0x03 },
+ { 0x76, 0x04 },
+ { 0x77, 0x04 },
+ };
+
+ PDEBUG(D_PROBE, "Device custom id %x", reg_r(sd, R51x_SYS_CUST_ID));
+
+ rc = write_regvals(sd, init_511, ARRAY_SIZE(init_511));
+ if (rc < 0)
+ return rc;
+
+ switch (sd->bridge) {
+ case BRIDGE_OV511:
+ rc = write_regvals(sd, norm_511, ARRAY_SIZE(norm_511));
+ if (rc < 0)
+ return rc;
+ break;
+ case BRIDGE_OV511PLUS:
+ rc = write_regvals(sd, norm_511_p, ARRAY_SIZE(norm_511_p));
+ if (rc < 0)
+ return rc;
+ break;
+ }
+
+ /* Init compression */
+ rc = write_regvals(sd, compress_511, ARRAY_SIZE(compress_511));
+ if (rc < 0)
+ return rc;
+
+ rc = ov51x_upload_quan_tables(sd);
+ if (rc < 0) {
+ PDEBUG(D_ERR, "Error uploading quantization tables");
+ return rc;
+ }
+
+ return 0;
+}
+
/* This initializes the OV518/OV518+ and the sensor */
static int ov518_configure(struct gspca_dev *gspca_dev)
{
@@ -1462,7 +1863,7 @@
int rc;
/* For 518 and 518+ */
- static struct ov_regvals init_518[] = {
+ const struct ov_regvals init_518[] = {
{ R51x_SYS_RESET, 0x40 },
{ R51x_SYS_INIT, 0xe1 },
{ R51x_SYS_RESET, 0x3e },
@@ -1473,7 +1874,7 @@
{ 0x5d, 0x03 },
};
- static struct ov_regvals norm_518[] = {
+ const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -1486,7 +1887,7 @@
{ 0x2f, 0x80 },
};
- static struct ov_regvals norm_518_p[] = {
+ const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -1531,7 +1932,7 @@
break;
}
- rc = ov518_upload_quan_tables(sd);
+ rc = ov51x_upload_quan_tables(sd);
if (rc < 0) {
PDEBUG(D_ERR, "Error uploading quantization tables");
return rc;
@@ -1573,9 +1974,14 @@
struct cam *cam;
int ret = 0;
- sd->bridge = id->driver_info;
+ sd->bridge = id->driver_info & BRIDGE_MASK;
+ sd->invert_led = id->driver_info & BRIDGE_INVERT_LED;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ ret = ov511_configure(gspca_dev);
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ret = ov518_configure(gspca_dev);
@@ -1634,6 +2040,16 @@
cam = &gspca_dev->cam;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ if (!sd->sif) {
+ cam->cam_mode = ov511_vga_mode;
+ cam->nmodes = ARRAY_SIZE(ov511_vga_mode);
+ } else {
+ cam->cam_mode = ov511_sif_mode;
+ cam->nmodes = ARRAY_SIZE(ov511_sif_mode);
+ }
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
if (!sd->sif) {
@@ -1655,13 +2071,28 @@
break;
}
sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
+ if (sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF)
+ sd->contrast = 200; /* The default is too low for the ov6630 */
+ else
+ sd->contrast = CONTRAST_DEF;
sd->colors = COLOR_DEF;
sd->hflip = HFLIP_DEF;
sd->vflip = VFLIP_DEF;
- if (sd->sensor != SEN_OV7670)
- gspca_dev->ctrl_dis = (1 << HFLIP_IDX)
- | (1 << VFLIP_IDX);
+ sd->autobrightness = AUTOBRIGHT_DEF;
+ if (sd->sensor == SEN_OV7670) {
+ sd->freq = OV7670_FREQ_DEF;
+ gspca_dev->ctrl_dis = 1 << FREQ_IDX;
+ } else {
+ sd->freq = FREQ_DEF;
+ gspca_dev->ctrl_dis = (1 << HFLIP_IDX) | (1 << VFLIP_IDX) |
+ (1 << OV7670_FREQ_IDX);
+ }
+ if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
+ gspca_dev->ctrl_dis |= 1 << AUTOBRIGHT_IDX;
+ /* OV8610 Frequency filter control should work but needs testing */
+ if (sd->sensor == SEN_OV8610)
+ gspca_dev->ctrl_dis |= 1 << FREQ_IDX;
+
return 0;
error:
PDEBUG(D_ERR, "OV519 Config failed");
@@ -1680,6 +2111,7 @@
return -EIO;
break;
case SEN_OV6630:
+ case SEN_OV66308AF:
if (write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)))
return -EIO;
break;
@@ -1688,6 +2120,8 @@
/* case SEN_OV76BE: */
if (write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610)))
return -EIO;
+ if (i2c_w_mask(sd, 0x0e, 0x00, 0x40))
+ return -EIO;
break;
case SEN_OV7620:
if (write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)))
@@ -1709,6 +2143,126 @@
return 0;
}
+/* Set up the OV511/OV511+ with the given image parameters.
+ *
+ * Do not put any sensor-specific code in here (including I2C I/O functions)
+ */
+static int ov511_mode_init_regs(struct sd *sd)
+{
+ int hsegs, vsegs, packet_size, fps, needed;
+ int interlaced = 0;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
+
+ reg_w(sd, R511_CAM_UV_EN, 0x01);
+ reg_w(sd, R511_SNAP_UV_EN, 0x01);
+ reg_w(sd, R511_SNAP_OPTS, 0x03);
+
+ /* Here I'm assuming that snapshot size == image size.
+ * I hope that's always true. --claudio
+ */
+ hsegs = (sd->gspca_dev.width >> 3) - 1;
+ vsegs = (sd->gspca_dev.height >> 3) - 1;
+
+ reg_w(sd, R511_CAM_PXCNT, hsegs);
+ reg_w(sd, R511_CAM_LNCNT, vsegs);
+ reg_w(sd, R511_CAM_PXDIV, 0x00);
+ reg_w(sd, R511_CAM_LNDIV, 0x00);
+
+ /* YUV420, low pass filter on */
+ reg_w(sd, R511_CAM_OPTS, 0x03);
+
+ /* Snapshot additions */
+ reg_w(sd, R511_SNAP_PXCNT, hsegs);
+ reg_w(sd, R511_SNAP_LNCNT, vsegs);
+ reg_w(sd, R511_SNAP_PXDIV, 0x00);
+ reg_w(sd, R511_SNAP_LNDIV, 0x00);
+
+ /******** Set the framerate ********/
+ if (frame_rate > 0)
+ sd->frame_rate = frame_rate;
+
+ switch (sd->sensor) {
+ case SEN_OV6620:
+ /* No framerate control, doesn't like higher rates yet */
+ sd->clockdiv = 3;
+ break;
+
+ /* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed
+ for more sensors we need to do this for them too */
+ case SEN_OV7620:
+ case SEN_OV7640:
+ case SEN_OV76BE:
+ if (sd->gspca_dev.width == 320)
+ interlaced = 1;
+ /* Fall through */
+ case SEN_OV6630:
+ case SEN_OV7610:
+ case SEN_OV7670:
+ switch (sd->frame_rate) {
+ case 30:
+ case 25:
+ /* Not enough bandwidth to do 640x480 @ 30 fps */
+ if (sd->gspca_dev.width != 640) {
+ sd->clockdiv = 0;
+ break;
+ }
+ /* Fall through for 640x480 case */
+ default:
+/* case 20: */
+/* case 15: */
+ sd->clockdiv = 1;
+ break;
+ case 10:
+ sd->clockdiv = 2;
+ break;
+ case 5:
+ sd->clockdiv = 5;
+ break;
+ }
+ if (interlaced) {
+ sd->clockdiv = (sd->clockdiv + 1) * 2 - 1;
+ /* Higher then 10 does not work */
+ if (sd->clockdiv > 10)
+ sd->clockdiv = 10;
+ }
+ break;
+
+ case SEN_OV8610:
+ /* No framerate control ?? */
+ sd->clockdiv = 0;
+ break;
+ }
+
+ /* Check if we have enough bandwidth to disable compression */
+ fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
+ needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
+ /* 1400 is a conservative estimate of the max nr of isoc packets/sec */
+ if (needed > 1400 * packet_size) {
+ /* Enable Y and UV quantization and compression */
+ reg_w(sd, R511_COMP_EN, 0x07);
+ reg_w(sd, R511_COMP_LUT_EN, 0x03);
+ } else {
+ reg_w(sd, R511_COMP_EN, 0x06);
+ reg_w(sd, R511_COMP_LUT_EN, 0x00);
+ }
+
+ reg_w(sd, R51x_SYS_RESET, OV511_RESET_OMNICE);
+ reg_w(sd, R51x_SYS_RESET, 0);
+
+ return 0;
+}
+
/* Sets up the OV518/OV518+ with the given image parameters
*
* OV518 needs a completely different approach, until we can figure out what
@@ -1718,7 +2272,19 @@
*/
static int ov518_mode_init_regs(struct sd *sd)
{
- int hsegs, vsegs;
+ int hsegs, vsegs, packet_size;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
/******** Set the mode ********/
@@ -1755,20 +2321,30 @@
/* Windows driver does this here; who knows why */
reg_w(sd, 0x2f, 0x80);
- /******** Set the framerate (to 30 FPS) ********/
- if (sd->bridge == BRIDGE_OV518PLUS)
- sd->clockdiv = 1;
- else
- sd->clockdiv = 0;
+ /******** Set the framerate ********/
+ sd->clockdiv = 1;
/* Mode independent, but framerate dependent, regs */
- reg_w(sd, 0x51, 0x04); /* Clock divider; lower==faster */
+ /* 0x51: Clock divider; Only works on some cams which use 2 crystals */
+ reg_w(sd, 0x51, 0x04);
reg_w(sd, 0x22, 0x18);
reg_w(sd, 0x23, 0xff);
- if (sd->bridge == BRIDGE_OV518PLUS)
- reg_w(sd, 0x21, 0x19);
- else
+ if (sd->bridge == BRIDGE_OV518PLUS) {
+ switch (sd->sensor) {
+ case SEN_OV7620:
+ if (sd->gspca_dev.width == 320) {
+ reg_w(sd, 0x20, 0x00);
+ reg_w(sd, 0x21, 0x19);
+ } else {
+ reg_w(sd, 0x20, 0x60);
+ reg_w(sd, 0x21, 0x1f);
+ }
+ break;
+ default:
+ reg_w(sd, 0x21, 0x19);
+ }
+ } else
reg_w(sd, 0x71, 0x17); /* Compression-related? */
/* FIXME: Sensor-specific */
@@ -1879,7 +2455,11 @@
reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.width >> 4);
reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.height >> 3);
- reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
+ if (sd->sensor == SEN_OV7670 &&
+ sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
+ reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
+ else
+ reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
reg_w(sd, OV519_R13_X_OFFSETH, 0x00);
reg_w(sd, OV519_R14_Y_OFFSETL, 0x00);
reg_w(sd, OV519_R15_Y_OFFSETH, 0x00);
@@ -1971,7 +2551,7 @@
int qvga;
gspca_dev = &sd->gspca_dev;
- qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
+ qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1;
/******** Mode (VGA/QVGA) and sensor specific regs ********/
switch (sd->sensor) {
@@ -1983,21 +2563,16 @@
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
break;
case SEN_OV7620:
-/* i2c_w(sd, 0x2b, 0x00); */
+ case SEN_OV76BE:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a);
i2c_w(sd, 0x25, qvga ? 0x30 : 0x60);
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
- i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0);
+ i2c_w_mask(sd, 0x67, qvga ? 0xb0 : 0x90, 0xf0);
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
break;
- case SEN_OV76BE:
-/* i2c_w(sd, 0x2b, 0x00); */
- i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
- break;
case SEN_OV7640:
-/* i2c_w(sd, 0x2b, 0x00); */
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
/* i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); */
@@ -2016,6 +2591,7 @@
break;
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
break;
default:
@@ -2023,10 +2599,6 @@
}
/******** Palette-specific regs ********/
- if (sd->sensor == SEN_OV7610 || sd->sensor == SEN_OV76BE) {
- /* not valid on the OV6620/OV7620/6630? */
- i2c_w_mask(sd, 0x0e, 0x00, 0x40);
- }
/* The OV518 needs special treatment. Although both the OV518
* and the OV6630 support a 16-bit video bus, only the 8 bit Y
@@ -2036,25 +2608,12 @@
/* OV7640 is 8-bit only */
- if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV7640)
+ if (sd->sensor != SEN_OV6630 && sd->sensor != SEN_OV66308AF &&
+ sd->sensor != SEN_OV7640)
i2c_w_mask(sd, 0x13, 0x00, 0x20);
/******** Clock programming ********/
- /* The OV6620 needs special handling. This prevents the
- * severe banding that normally occurs */
- if (sd->sensor == SEN_OV6620) {
-
- /* Clock down */
- i2c_w(sd, 0x2a, 0x04);
- i2c_w(sd, 0x11, sd->clockdiv);
- i2c_w(sd, 0x2a, 0x84);
- /* This next setting is critical. It seems to improve
- * the gain or the contrast. The "reserved" bits seem
- * to have some effect in this case. */
- i2c_w(sd, 0x2d, 0x85);
- } else {
- i2c_w(sd, 0x11, sd->clockdiv);
- }
+ i2c_w(sd, 0x11, sd->clockdiv);
/******** Special Features ********/
/* no evidence this is possible with OV7670, either */
@@ -2098,13 +2657,14 @@
static int set_ov_sensor_window(struct sd *sd)
{
struct gspca_dev *gspca_dev;
- int qvga;
+ int qvga, crop;
int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale;
int ret, hstart, hstop, vstop, vstart;
__u8 v;
gspca_dev = &sd->gspca_dev;
- qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
+ qvga = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 1;
+ crop = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv & 2;
/* The different sensor ICs handle setting up of window differently.
* IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */
@@ -2123,14 +2683,19 @@
break;
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
hwsbase = 0x38;
hwebase = 0x3a;
vwsbase = 0x05;
vwebase = 0x06;
- if (qvga) {
+ if (sd->sensor == SEN_OV66308AF && qvga)
/* HDG: this fixes U and V getting swapped */
- hwsbase--;
- vwsbase--;
+ hwsbase++;
+ if (crop) {
+ hwsbase += 8;
+ hwebase += 8;
+ vwsbase += 11;
+ vwebase += 11;
}
break;
case SEN_OV7620:
@@ -2155,6 +2720,7 @@
switch (sd->sensor) {
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
if (qvga) { /* QCIF */
hwscale = 0;
vwscale = 0;
@@ -2207,7 +2773,7 @@
if (qvga) { /* QVGA from ov7670.c by
* Jonathan Corbet */
hstart = 164;
- hstop = 20;
+ hstop = 28;
vstart = 14;
vstop = 494;
} else { /* VGA */
@@ -2233,7 +2799,6 @@
msleep(10); /* need to sleep between read and write to
* same reg! */
i2c_w(sd, OV7670_REG_VREF, v);
- sethvflip(sd);
} else {
i2c_w(sd, 0x17, hwsbase);
i2c_w(sd, 0x18, hwebase + (sd->gspca_dev.width >> hwscale));
@@ -2250,6 +2815,10 @@
int ret = 0;
switch (sd->bridge) {
+ case BRIDGE_OV511:
+ case BRIDGE_OV511PLUS:
+ ret = ov511_mode_init_regs(sd);
+ break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ret = ov518_mode_init_regs(sd);
@@ -2268,6 +2837,9 @@
setcontrast(gspca_dev);
setbrightness(gspca_dev);
setcolors(gspca_dev);
+ sethvflip(sd);
+ setautobrightness(sd);
+ setfreq(sd);
ret = ov51x_restart(sd);
if (ret < 0)
@@ -2287,23 +2859,88 @@
ov51x_led_control(sd, 0);
}
+static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
+ struct gspca_frame *frame, /* target */
+ __u8 *in, /* isoc packet */
+ int len) /* iso packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ /* SOF/EOF packets have 1st to 8th bytes zeroed and the 9th
+ * byte non-zero. The EOF packet has image width/height in the
+ * 10th and 11th bytes. The 9th byte is given as follows:
+ *
+ * bit 7: EOF
+ * 6: compression enabled
+ * 5: 422/420/400 modes
+ * 4: 422/420/400 modes
+ * 3: 1
+ * 2: snapshot button on
+ * 1: snapshot frame
+ * 0: even/odd field
+ */
+ if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) &&
+ (in[8] & 0x08)) {
+ if (in[8] & 0x80) {
+ /* Frame end */
+ if ((in[9] + 1) * 8 != gspca_dev->width ||
+ (in[10] + 1) * 8 != gspca_dev->height) {
+ PDEBUG(D_ERR, "Invalid frame size, got: %dx%d,"
+ " requested: %dx%d\n",
+ (in[9] + 1) * 8, (in[10] + 1) * 8,
+ gspca_dev->width, gspca_dev->height);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+ /* Add 11 byte footer to frame, might be usefull */
+ gspca_frame_add(gspca_dev, LAST_PACKET, frame, in, 11);
+ return;
+ } else {
+ /* Frame start */
+ gspca_frame_add(gspca_dev, FIRST_PACKET, frame, in, 0);
+ sd->packet_nr = 0;
+ }
+ }
+
+ /* Ignore the packet number */
+ len--;
+
+ /* intermediate packet */
+ gspca_frame_add(gspca_dev, INTER_PACKET, frame, in, len);
+}
+
static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
struct gspca_frame *frame, /* target */
__u8 *data, /* isoc packet */
int len) /* iso packet length */
{
- PDEBUG(D_STREAM, "ov518_pkt_scan: %d bytes", len);
-
- if (len & 7) {
- len--;
- PDEBUG(D_STREAM, "packet number: %d\n", (int)data[len]);
- }
+ struct sd *sd = (struct sd *) gspca_dev;
/* A false positive here is likely, until OVT gives me
* the definitive SOF/EOF format */
if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0);
+ sd->packet_nr = 0;
+ }
+
+ if (gspca_dev->last_packet_type == DISCARD_PACKET)
+ return;
+
+ /* Does this device use packet numbers ? */
+ if (len & 7) {
+ len--;
+ if (sd->packet_nr == data[len])
+ sd->packet_nr++;
+ /* The last few packets of the frame (which are all 0's
+ except that they may contain part of the footer), are
+ numbered 0 */
+ else if (sd->packet_nr == 0 || data[len]) {
+ PDEBUG(D_ERR, "Invalid packet nr: %d (expect: %d)",
+ (int)data[len], (int)sd->packet_nr);
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
}
/* intermediate packet */
@@ -2364,6 +3001,7 @@
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
+ ov511_pkt_scan(gspca_dev, frame, data, len);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
@@ -2389,13 +3027,13 @@
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
case SEN_OV7640:
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7620:
/* 7620 doesn't like manual changes when in auto mode */
-/*fixme
- * if (!sd->auto_brt) */
+ if (!sd->autobrightness)
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7670:
@@ -2418,6 +3056,7 @@
i2c_w(sd, OV7610_REG_CNT, val);
break;
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f);
break;
case SEN_OV8610: {
@@ -2462,6 +3101,7 @@
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
+ case SEN_OV66308AF:
i2c_w(sd, OV7610_REG_SAT, val);
break;
case SEN_OV7620:
@@ -2482,6 +3122,72 @@
}
}
+static void setautobrightness(struct sd *sd)
+{
+ if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7670)
+ return;
+
+ i2c_w_mask(sd, 0x2d, sd->autobrightness ? 0x10 : 0x00, 0x10);
+}
+
+static void setfreq(struct sd *sd)
+{
+ if (sd->sensor == SEN_OV7670) {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w_mask(sd, OV7670_REG_COM8, 0, OV7670_COM8_BFILT);
+ break;
+ case 1: /* 50 hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, 0x08, 0x18);
+ break;
+ case 2: /* 60 hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, 0x00, 0x18);
+ break;
+ case 3: /* Auto hz */
+ i2c_w_mask(sd, OV7670_REG_COM8, OV7670_COM8_BFILT,
+ OV7670_COM8_BFILT);
+ i2c_w_mask(sd, OV7670_REG_COM11, OV7670_COM11_HZAUTO,
+ 0x18);
+ break;
+ }
+ } else {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w_mask(sd, 0x2d, 0x00, 0x04);
+ i2c_w_mask(sd, 0x2a, 0x00, 0x80);
+ break;
+ case 1: /* 50 hz (filter on and framerate adj) */
+ i2c_w_mask(sd, 0x2d, 0x04, 0x04);
+ i2c_w_mask(sd, 0x2a, 0x80, 0x80);
+ /* 20 fps -> 16.667 fps */
+ if (sd->sensor == SEN_OV6620 ||
+ sd->sensor == SEN_OV6630 ||
+ sd->sensor == SEN_OV66308AF)
+ i2c_w(sd, 0x2b, 0x5e);
+ else
+ i2c_w(sd, 0x2b, 0xac);
+ break;
+ case 2: /* 60 hz (filter on, ...) */
+ i2c_w_mask(sd, 0x2d, 0x04, 0x04);
+ if (sd->sensor == SEN_OV6620 ||
+ sd->sensor == SEN_OV6630 ||
+ sd->sensor == SEN_OV66308AF) {
+ /* 20 fps -> 15 fps */
+ i2c_w_mask(sd, 0x2a, 0x80, 0x80);
+ i2c_w(sd, 0x2b, 0xa8);
+ } else {
+ /* no framerate adj. */
+ i2c_w_mask(sd, 0x2a, 0x00, 0x80);
+ }
+ break;
+ }
+ }
+}
+
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -2572,6 +3278,71 @@
return 0;
}
+static int sd_setautobrightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->autobrightness = val;
+ if (gspca_dev->streaming)
+ setautobrightness(sd);
+ return 0;
+}
+
+static int sd_getautobrightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->autobrightness;
+ return 0;
+}
+
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freq = val;
+ if (gspca_dev->streaming)
+ setfreq(sd);
+ return 0;
+}
+
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freq;
+ return 0;
+}
+
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "NoFliker");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
+ strcpy((char *) menu->name, "60 Hz");
+ return 0;
+ case 3:
+ if (sd->sensor != SEN_OV7670)
+ return -EINVAL;
+
+ strcpy((char *) menu->name, "Automatic");
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -2582,6 +3353,7 @@
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
+ .querymenu = sd_querymenu,
};
/* -- module initialisation -- */
@@ -2590,17 +3362,22 @@
{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x4064),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
+ {USB_DEVICE(0x041e, 0x4068),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
{USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
{USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x05a9, 0xa511), .driver_info = BRIDGE_OV511PLUS },
{USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS },
+ {USB_DEVICE(0x0813, 0x0002), .driver_info = BRIDGE_OV511PLUS },
{}
};
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index dc6a6f1..0d02f41 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -46,6 +46,7 @@
u8 gamma;
u8 vflip; /* ov7630/ov7648 only */
u8 infrared; /* mt9v111 only */
+ u8 freq; /* ov76xx only */
u8 quality; /* image quality */
#define QUALITY_MIN 60
#define QUALITY_MAX 95
@@ -96,8 +97,11 @@
static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
static int sd_setinfrared(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getinfrared(struct gspca_dev *gspca_dev, __s32 *val);
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
static struct ctrl sd_ctrls[] = {
+#define BRIGHTNESS_IDX 0
{
{
.id = V4L2_CID_BRIGHTNESS,
@@ -113,6 +117,7 @@
.set = sd_setbrightness,
.get = sd_getbrightness,
},
+#define CONTRAST_IDX 1
{
{
.id = V4L2_CID_CONTRAST,
@@ -128,20 +133,22 @@
.set = sd_setcontrast,
.get = sd_getcontrast,
},
+#define COLOR_IDX 2
{
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
+ .name = "Saturation",
.minimum = 0,
.maximum = 40,
.step = 1,
-#define COLOR_DEF 32
+#define COLOR_DEF 25
.default_value = COLOR_DEF,
},
.set = sd_setcolors,
.get = sd_getcolors,
},
+#define BLUE_BALANCE_IDX 3
{
{
.id = V4L2_CID_BLUE_BALANCE,
@@ -156,6 +163,7 @@
.set = sd_setblue_balance,
.get = sd_getblue_balance,
},
+#define RED_BALANCE_IDX 4
{
{
.id = V4L2_CID_RED_BALANCE,
@@ -170,6 +178,7 @@
.set = sd_setred_balance,
.get = sd_getred_balance,
},
+#define GAMMA_IDX 5
{
{
.id = V4L2_CID_GAMMA,
@@ -184,7 +193,7 @@
.set = sd_setgamma,
.get = sd_getgamma,
},
-#define AUTOGAIN_IDX 5
+#define AUTOGAIN_IDX 6
{
{
.id = V4L2_CID_AUTOGAIN,
@@ -200,7 +209,7 @@
.get = sd_getautogain,
},
/* ov7630/ov7648 only */
-#define VFLIP_IDX 6
+#define VFLIP_IDX 7
{
{
.id = V4L2_CID_VFLIP,
@@ -209,14 +218,14 @@
.minimum = 0,
.maximum = 1,
.step = 1,
-#define VFLIP_DEF 0 /* vflip def = 1 for ov7630 */
+#define VFLIP_DEF 0
.default_value = VFLIP_DEF,
},
.set = sd_setvflip,
.get = sd_getvflip,
},
/* mt9v111 only */
-#define INFRARED_IDX 7
+#define INFRARED_IDX 8
{
{
.id = V4L2_CID_INFRARED,
@@ -231,28 +240,44 @@
.set = sd_setinfrared,
.get = sd_getinfrared,
},
+/* ov7630/ov7648/ov7660 only */
+#define FREQ_IDX 9
+ {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
+ .step = 1,
+#define FREQ_DEF 2
+ .default_value = FREQ_DEF,
+ },
+ .set = sd_setfreq,
+ .get = sd_getfreq,
+ },
};
/* table of the disabled controls */
static __u32 ctrl_dis[] = {
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_HV7131R 0 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MI0360 1 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MO4000 2 */
- (1 << VFLIP_IDX),
+ (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_MT9V111 3 */
- (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
+ (1 << INFRARED_IDX) | (1 << VFLIP_IDX) | (1 << FREQ_IDX),
/* SENSOR_OM6802 4 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX),
+ (1 << INFRARED_IDX),
/* SENSOR_OV7630 5 */
(1 << INFRARED_IDX),
/* SENSOR_OV7648 6 */
(1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
/* SENSOR_OV7660 7 */
- (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX),
- /* SENSOR_SP80708 8 */
+ (1 << AUTOGAIN_IDX) | (1 << INFRARED_IDX) | (1 << VFLIP_IDX) |
+ (1 << FREQ_IDX), /* SENSOR_SP80708 8 */
};
static const struct v4l2_pix_format vga_mode[] = {
@@ -268,7 +293,8 @@
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
- .sizeimage = 640 * 480 * 3 / 8 + 590,
+ /* Note 3 / 8 is not large enough, not even 5 / 8 is ?! */
+ .sizeimage = 640 * 480 * 3 / 4 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
@@ -604,7 +630,9 @@
/* win: i2c_r from 00 to 80 */
{0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10},
{0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10},
- {0xd1, 0x21, 0x11, 0x00, 0x48, 0xc0, 0x00, 0x10},
+/* HDG: 0x11 was 0x00 change to 0x01 for better exposure (15 fps instead of 30)
+ 0x13 was 0xc0 change to 0xc3 for auto gain and exposure */
+ {0xd1, 0x21, 0x11, 0x01, 0x48, 0xc3, 0x00, 0x10},
{0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10},
{0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10},
{0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10},
@@ -638,9 +666,8 @@
{0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10},
{0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10},
/* */
- {0xa1, 0x21, 0x11, 0x00, 0x00, 0x00, 0x00, 0x10},
- {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10},
- {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10},
+/* {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */
+/* {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */
/* */
{0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10},
/* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */
@@ -673,7 +700,7 @@
{0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10},
/* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */
/* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */
- {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10},
+/* {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, set by setfreq */
/*...*/
/* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */
/* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN
@@ -1294,11 +1321,9 @@
sd->gamma = GAMMA_DEF;
sd->autogain = AUTOGAIN_DEF;
sd->ag_cnt = -1;
- if (sd->sensor != SENSOR_OV7630)
- sd->vflip = 0;
- else
- sd->vflip = 1;
+ sd->vflip = VFLIP_DEF;
sd->infrared = INFRARED_DEF;
+ sd->freq = FREQ_DEF;
sd->quality = QUALITY_DEF;
sd->jpegqual = 80;
@@ -1569,7 +1594,7 @@
else
comb = 0xa0;
if (sd->autogain)
- comb |= 0x02;
+ comb |= 0x03;
i2c_w1(&sd->gspca_dev, 0x13, comb);
return;
}
@@ -1585,12 +1610,15 @@
{
u8 comn;
- if (sd->sensor == SENSOR_OV7630)
+ if (sd->sensor == SENSOR_OV7630) {
comn = 0x02;
- else
+ if (!sd->vflip)
+ comn |= 0x80;
+ } else {
comn = 0x06;
- if (sd->vflip)
- comn |= 0x80;
+ if (sd->vflip)
+ comn |= 0x80;
+ }
i2c_w1(&sd->gspca_dev, 0x75, comn);
}
@@ -1602,6 +1630,58 @@
sd->infrared ? 0x66 : 0x64);
}
+static void setfreq(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->sensor == SENSOR_OV7660) {
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ i2c_w1(gspca_dev, 0x13, 0xdf);
+ break;
+ case 1: /* 50 hz */
+ i2c_w1(gspca_dev, 0x13, 0xff);
+ i2c_w1(gspca_dev, 0x3b, 0x0a);
+ break;
+ case 2: /* 60 hz */
+ i2c_w1(gspca_dev, 0x13, 0xff);
+ i2c_w1(gspca_dev, 0x3b, 0x02);
+ break;
+ }
+ } else {
+ u8 reg2a = 0, reg2b = 0, reg2d = 0;
+
+ /* Get reg2a / reg2d base values */
+ switch (sd->sensor) {
+ case SENSOR_OV7630:
+ reg2a = 0x08;
+ reg2d = 0x01;
+ break;
+ case SENSOR_OV7648:
+ reg2a = 0x11;
+ reg2d = 0x81;
+ break;
+ }
+
+ switch (sd->freq) {
+ case 0: /* Banding filter disabled */
+ break;
+ case 1: /* 50 hz (filter on and framerate adj) */
+ reg2a |= 0x80;
+ reg2b = 0xac;
+ reg2d |= 0x04;
+ break;
+ case 2: /* 60 hz (filter on, no framerate adj) */
+ reg2a |= 0x80;
+ reg2d |= 0x04;
+ break;
+ }
+ i2c_w1(gspca_dev, 0x2a, reg2a);
+ i2c_w1(gspca_dev, 0x2b, reg2b);
+ i2c_w1(gspca_dev, 0x2d, reg2d);
+ }
+}
+
static void setjpegqual(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1828,6 +1908,7 @@
setbrightness(gspca_dev);
setcontrast(gspca_dev);
setautogain(gspca_dev);
+ setfreq(gspca_dev);
return 0;
}
@@ -2131,6 +2212,24 @@
return 0;
}
+static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ sd->freq = val;
+ if (gspca_dev->streaming)
+ setfreq(gspca_dev);
+ return 0;
+}
+
+static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ *val = sd->freq;
+ return 0;
+}
+
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
@@ -2159,6 +2258,27 @@
return 0;
}
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
+ strcpy((char *) menu->name, "NoFliker");
+ return 0;
+ case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
+ strcpy((char *) menu->name, "60 Hz");
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -2173,6 +2293,7 @@
.dq_callback = do_autogain,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
+ .querymenu = sd_querymenu,
};
/* -- module initialisation -- */
@@ -2233,7 +2354,7 @@
{USB_DEVICE(0x0c45, 0x613b), BSI(SN9C120, OV7660, 0x21)},
#endif
{USB_DEVICE(0x0c45, 0x613c), BSI(SN9C120, HV7131R, 0x11)},
-/* {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x??)}, */
+ {USB_DEVICE(0x0c45, 0x613e), BSI(SN9C120, OV7630, 0x21)},
{USB_DEVICE(0x0c45, 0x6143), BSI(SN9C120, SP80708, 0x18)},
{}
};
diff --git a/drivers/media/video/gspca/stv06xx/Makefile b/drivers/media/video/gspca/stv06xx/Makefile
index feeaa94..2f3c3a6 100644
--- a/drivers/media/video/gspca/stv06xx/Makefile
+++ b/drivers/media/video/gspca/stv06xx/Makefile
@@ -3,7 +3,8 @@
gspca_stv06xx-objs := stv06xx.o \
stv06xx_vv6410.o \
stv06xx_hdcs.o \
- stv06xx_pb0100.o
+ stv06xx_pb0100.o \
+ stv06xx_st6422.o
EXTRA_CFLAGS += -Idrivers/media/video/gspca
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index e573c34..0da8e0d 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -92,11 +92,10 @@
{
int err = 0;
- if (IS_850(sd)) {
+ if (sd->bridge == BRIDGE_STV610) {
struct usb_device *udev = sd->gspca_dev.dev;
__u8 *buf = sd->gspca_dev.usb_buf;
- /* Quickam Web needs an extra packet */
buf[0] = 0;
err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x04, 0x40, 0x1704, 0, buf, 1,
@@ -253,7 +252,7 @@
err = sd->sensor->init(sd);
- if (dump_sensor)
+ if (dump_sensor && sd->sensor->dump)
sd->sensor->dump(sd);
return (err < 0) ? err : 0;
@@ -318,6 +317,8 @@
__u8 *data, /* isoc packet */
int len) /* iso packet length */
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
PDEBUG(D_PACK, "Packet of length %d arrived", len);
/* A packet may contain several frames
@@ -343,14 +344,29 @@
if (len < chunk_len) {
PDEBUG(D_ERR, "URB packet length is smaller"
" than the specified chunk length");
+ gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
+ /* First byte seem to be 02=data 2nd byte is unknown??? */
+ if (sd->bridge == BRIDGE_ST6422 && (id & 0xFF00) == 0x0200)
+ goto frame_data;
+
switch (id) {
case 0x0200:
case 0x4200:
+frame_data:
PDEBUG(D_PACK, "Frame data packet detected");
+ if (sd->to_skip) {
+ int skip = (sd->to_skip < chunk_len) ?
+ sd->to_skip : chunk_len;
+ data += skip;
+ len -= skip;
+ chunk_len -= skip;
+ sd->to_skip -= skip;
+ }
+
gspca_frame_add(gspca_dev, INTER_PACKET, frame,
data, chunk_len);
break;
@@ -365,6 +381,9 @@
gspca_frame_add(gspca_dev, FIRST_PACKET,
frame, data, 0);
+ if (sd->bridge == BRIDGE_ST6422)
+ sd->to_skip = gspca_dev->width * 4;
+
if (chunk_len)
PDEBUG(D_ERR, "Chunk length is "
"non-zero on a SOF");
@@ -395,8 +414,12 @@
/* Unknown chunk with 2 bytes of data,
occurs 2-3 times per USB interrupt */
break;
+ case 0x42ff:
+ PDEBUG(D_PACK, "Chunk 0x42ff detected");
+ /* Special chunk seen sometimes on the ST6422 */
+ break;
default:
- PDEBUG(D_PACK, "Unknown chunk %d detected", id);
+ PDEBUG(D_PACK, "Unknown chunk 0x%04x detected", id);
/* Unknown chunk */
}
data += chunk_len;
@@ -428,11 +451,16 @@
cam = &gspca_dev->cam;
sd->desc = sd_desc;
+ sd->bridge = id->driver_info;
gspca_dev->sd_desc = &sd->desc;
if (dump_bridge)
stv06xx_dump_bridge(sd);
+ sd->sensor = &stv06xx_sensor_st6422;
+ if (!sd->sensor->probe(sd))
+ return 0;
+
sd->sensor = &stv06xx_sensor_vv6410;
if (!sd->sensor->probe(sd))
return 0;
@@ -457,9 +485,20 @@
/* -- module initialisation -- */
static const __devinitdata struct usb_device_id device_table[] = {
- {USB_DEVICE(0x046d, 0x0840)}, /* QuickCam Express */
- {USB_DEVICE(0x046d, 0x0850)}, /* LEGO cam / QuickCam Web */
- {USB_DEVICE(0x046d, 0x0870)}, /* Dexxa WebCam USB */
+ /* QuickCam Express */
+ {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
+ /* LEGO cam / QuickCam Web */
+ {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 },
+ /* Dexxa WebCam USB */
+ {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 },
+ /* QuickCam Messenger */
+ {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Communicate */
+ {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Messenger (new) */
+ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
+ /* QuickCam Messenger (new) */
+ {USB_DEVICE(0x046D, 0x08DA), .driver_info = BRIDGE_ST6422 },
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.h b/drivers/media/video/gspca/stv06xx/stv06xx.h
index 1207e7d..9df7137 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.h
@@ -93,6 +93,17 @@
/* Sensor private data */
void *sensor_priv;
+
+ /* The first 4 lines produced by the stv6422 are no good, this keeps
+ track of how many bytes we still need to skip during a frame */
+ int to_skip;
+
+ /* Bridge / Camera type */
+ u8 bridge;
+ #define BRIDGE_STV600 0
+ #define BRIDGE_STV602 1
+ #define BRIDGE_STV610 2
+ #define BRIDGE_ST6422 3 /* With integrated sensor */
};
int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
index b169038..3039ec2 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.c
@@ -434,7 +434,7 @@
hdcs->exp.er = 100;
/*
- * Frame rate on HDCS-1000 0x46D:0x840 depends on PSMP:
+ * Frame rate on HDCS-1000 with STV600 depends on PSMP:
* 4 = doesn't work at all
* 5 = 7.8 fps,
* 6 = 6.9 fps,
@@ -443,7 +443,7 @@
* 15 = 4.4 fps,
* 31 = 2.8 fps
*
- * Frame rate on HDCS-1000 0x46D:0x870 depends on PSMP:
+ * Frame rate on HDCS-1000 with STV602 depends on PSMP:
* 15 = doesn't work at all
* 18 = doesn't work at all
* 19 = 7.3 fps
@@ -453,7 +453,7 @@
* 24 = 6.3 fps
* 30 = 5.4 fps
*/
- hdcs->psmp = IS_870(sd) ? 20 : 5;
+ hdcs->psmp = (sd->bridge == BRIDGE_STV602) ? 20 : 5;
sd->sensor_priv = hdcs;
@@ -530,7 +530,7 @@
int i, err = 0;
/* Set the STV0602AA in STV0600 emulation mode */
- if (IS_870(sd))
+ if (sd->bridge == BRIDGE_STV602)
stv06xx_write_bridge(sd, STV_STV0600_EMULATION, 1);
/* Execute the bridge init */
@@ -558,7 +558,7 @@
return err;
/* Set PGA sample duration
- (was 0x7E for IS_870, but caused slow framerate with HDCS-1020) */
+ (was 0x7E for the STV602, but caused slow framerate with HDCS-1020) */
if (IS_1020(sd))
err = stv06xx_write_sensor(sd, HDCS_TCTRL,
(HDCS_ADC_START_SIG_DUR << 6) | hdcs->psmp);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
index e88c42f..934b9ce 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_sensor.h
@@ -32,14 +32,13 @@
#include "stv06xx.h"
-#define IS_850(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x850)
-#define IS_870(sd) ((sd)->gspca_dev.dev->descriptor.idProduct == 0x870)
#define IS_1020(sd) ((sd)->sensor == &stv06xx_sensor_hdcs1020)
extern const struct stv06xx_sensor stv06xx_sensor_vv6410;
extern const struct stv06xx_sensor stv06xx_sensor_hdcs1x00;
extern const struct stv06xx_sensor stv06xx_sensor_hdcs1020;
extern const struct stv06xx_sensor stv06xx_sensor_pb0100;
+extern const struct stv06xx_sensor stv06xx_sensor_st6422;
struct stv06xx_sensor {
/* Defines the name of a sensor */
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
new file mode 100644
index 0000000..87cb5b9
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.c
@@ -0,0 +1,453 @@
+/*
+ * Support for the sensor part which is integrated (I think) into the
+ * st6422 stv06xx alike bridge, as its integrated there are no i2c writes
+ * but instead direct bridge writes.
+ *
+ * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Strongly based on qc-usb-messenger, which is:
+ * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
+ * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
+ * Copyright (c) 2002, 2003 Tuukka Toivonen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "stv06xx_st6422.h"
+
+static struct v4l2_pix_format st6422_mode[] = {
+ /* Note we actually get 124 lines of data, of which we skip the 4st
+ 4 as they are garbage */
+ {
+ 162,
+ 120,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .sizeimage = 162 * 120,
+ .bytesperline = 162,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 1
+ },
+ /* Note we actually get 248 lines of data, of which we skip the 4st
+ 4 as they are garbage, and we tell the app it only gets the
+ first 240 of the 244 lines it actually gets, so that it ignores
+ the last 4. */
+ {
+ 324,
+ 240,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_FIELD_NONE,
+ .sizeimage = 324 * 244,
+ .bytesperline = 324,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 0
+ },
+};
+
+static const struct ctrl st6422_ctrl[] = {
+#define BRIGHTNESS_IDX 0
+ {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 3
+ },
+ .set = st6422_set_brightness,
+ .get = st6422_get_brightness
+ },
+#define CONTRAST_IDX 1
+ {
+ {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 15,
+ .step = 1,
+ .default_value = 11
+ },
+ .set = st6422_set_contrast,
+ .get = st6422_get_contrast
+ },
+#define GAIN_IDX 2
+ {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64
+ },
+ .set = st6422_set_gain,
+ .get = st6422_get_gain
+ },
+#define EXPOSURE_IDX 3
+ {
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 1023,
+ .step = 1,
+ .default_value = 256
+ },
+ .set = st6422_set_exposure,
+ .get = st6422_get_exposure
+ },
+};
+
+static int st6422_probe(struct sd *sd)
+{
+ int i;
+ s32 *sensor_settings;
+
+ if (sd->bridge != BRIDGE_ST6422)
+ return -ENODEV;
+
+ info("st6422 sensor detected");
+
+ sensor_settings = kmalloc(ARRAY_SIZE(st6422_ctrl) * sizeof(s32),
+ GFP_KERNEL);
+ if (!sensor_settings)
+ return -ENOMEM;
+
+ sd->gspca_dev.cam.cam_mode = st6422_mode;
+ sd->gspca_dev.cam.nmodes = ARRAY_SIZE(st6422_mode);
+ sd->desc.ctrls = st6422_ctrl;
+ sd->desc.nctrls = ARRAY_SIZE(st6422_ctrl);
+ sd->sensor_priv = sensor_settings;
+
+ for (i = 0; i < sd->desc.nctrls; i++)
+ sensor_settings[i] = st6422_ctrl[i].qctrl.default_value;
+
+ return 0;
+}
+
+static int st6422_init(struct sd *sd)
+{
+ int err = 0, i;
+
+ const u16 st6422_bridge_init[][2] = {
+ { STV_ISO_ENABLE, 0x00 }, /* disable capture */
+ { 0x1436, 0x00 },
+ { 0x1432, 0x03 }, /* 0x00-0x1F brightness */
+ { 0x143a, 0xF9 }, /* 0x00-0x0F contrast */
+ { 0x0509, 0x38 }, /* R */
+ { 0x050a, 0x38 }, /* G */
+ { 0x050b, 0x38 }, /* B */
+ { 0x050c, 0x2A },
+ { 0x050d, 0x01 },
+
+
+ { 0x1431, 0x00 }, /* 0x00-0x07 ??? */
+ { 0x1433, 0x34 }, /* 160x120, 0x00-0x01 night filter */
+ { 0x1438, 0x18 }, /* 640x480 */
+/* 18 bayes */
+/* 10 compressed? */
+
+ { 0x1439, 0x00 },
+/* antiflimmer?? 0xa2 ger perfekt bild mot monitor */
+
+ { 0x143b, 0x05 },
+ { 0x143c, 0x00 }, /* 0x00-0x01 - ??? */
+
+
+/* shutter time 0x0000-0x03FF */
+/* low value give good picures on moving objects (but requires much light) */
+/* high value gives good picures in darkness (but tends to be overexposed) */
+ { 0x143e, 0x01 },
+ { 0x143d, 0x00 },
+
+ { 0x1442, 0xe2 },
+/* write: 1x1x xxxx */
+/* read: 1x1x xxxx */
+/* bit 5 == button pressed and hold if 0 */
+/* write 0xe2,0xea */
+
+/* 0x144a */
+/* 0x00 init */
+/* bit 7 == button has been pressed, but not handled */
+
+/* interrupt */
+/* if(urb->iso_frame_desc[i].status == 0x80) { */
+/* if(urb->iso_frame_desc[i].status == 0x88) { */
+
+ { 0x1500, 0xd0 },
+ { 0x1500, 0xd0 },
+ { 0x1500, 0x50 }, /* 0x00 - 0xFF 0x80 == compr ? */
+
+ { 0x1501, 0xaf },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+ { 0x1502, 0xc2 },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+ { 0x1503, 0x45 },
+/* high val-> ljus area blir morkare. */
+/* low val -> ljus area blir ljusare. */
+
+ { 0x1505, 0x02 },
+/* 2 : 324x248 80352 bytes */
+/* 7 : 248x162 40176 bytes */
+/* c+f: 162*124 20088 bytes */
+
+ { 0x150e, 0x8e },
+ { 0x150f, 0x37 },
+ { 0x15c0, 0x00 },
+ { 0x15c1, 1023 }, /* 160x120, ISOC_PACKET_SIZE */
+ { 0x15c3, 0x08 }, /* 0x04/0x14 ... test pictures ??? */
+
+
+ { 0x143f, 0x01 }, /* commit settings */
+
+ };
+
+ for (i = 0; i < ARRAY_SIZE(st6422_bridge_init) && !err; i++) {
+ err = stv06xx_write_bridge(sd, st6422_bridge_init[i][0],
+ st6422_bridge_init[i][1]);
+ }
+
+ return err;
+}
+
+static void st6422_disconnect(struct sd *sd)
+{
+ sd->sensor = NULL;
+ kfree(sd->sensor_priv);
+}
+
+static int st6422_start(struct sd *sd)
+{
+ int err, packet_size;
+ struct cam *cam = &sd->gspca_dev.cam;
+ s32 *sensor_settings = sd->sensor_priv;
+ struct usb_host_interface *alt;
+ struct usb_interface *intf;
+
+ intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
+ alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
+ if (!alt) {
+ PDEBUG(D_ERR, "Couldn't get altsetting");
+ return -EIO;
+ }
+
+ packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
+ err = stv06xx_write_bridge(sd, 0x15c1, packet_size);
+ if (err < 0)
+ return err;
+
+ if (cam->cam_mode[sd->gspca_dev.curr_mode].priv)
+ err = stv06xx_write_bridge(sd, 0x1505, 0x0f);
+ else
+ err = stv06xx_write_bridge(sd, 0x1505, 0x02);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_brightness(&sd->gspca_dev,
+ sensor_settings[BRIGHTNESS_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_contrast(&sd->gspca_dev,
+ sensor_settings[CONTRAST_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_exposure(&sd->gspca_dev,
+ sensor_settings[EXPOSURE_IDX]);
+ if (err < 0)
+ return err;
+
+ err = st6422_set_gain(&sd->gspca_dev,
+ sensor_settings[GAIN_IDX]);
+ if (err < 0)
+ return err;
+
+ PDEBUG(D_STREAM, "Starting stream");
+
+ return 0;
+}
+
+static int st6422_stop(struct sd *sd)
+{
+ PDEBUG(D_STREAM, "Halting stream");
+
+ return 0;
+}
+
+static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[BRIGHTNESS_IDX];
+
+ PDEBUG(D_V4L2, "Read brightness %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[BRIGHTNESS_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ /* val goes from 0 -> 31 */
+ PDEBUG(D_V4L2, "Set brightness to %d", val);
+ err = stv06xx_write_bridge(sd, 0x1432, val);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[CONTRAST_IDX];
+
+ PDEBUG(D_V4L2, "Read contrast %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[CONTRAST_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ /* Val goes from 0 -> 15 */
+ PDEBUG(D_V4L2, "Set contrast to %d\n", val);
+ err = stv06xx_write_bridge(sd, 0x143a, 0xf0 | val);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[GAIN_IDX];
+
+ PDEBUG(D_V4L2, "Read gain %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[GAIN_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ PDEBUG(D_V4L2, "Set gain to %d", val);
+
+ /* Set red, green, blue, gain */
+ err = stv06xx_write_bridge(sd, 0x0509, val);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050a, val);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050b, val);
+ if (err < 0)
+ return err;
+
+ /* 2 mystery writes */
+ err = stv06xx_write_bridge(sd, 0x050c, 0x2a);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x050d, 0x01);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
+
+static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ *val = sensor_settings[EXPOSURE_IDX];
+
+ PDEBUG(D_V4L2, "Read exposure %d", *val);
+
+ return 0;
+}
+
+static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
+{
+ int err;
+ struct sd *sd = (struct sd *) gspca_dev;
+ s32 *sensor_settings = sd->sensor_priv;
+
+ sensor_settings[EXPOSURE_IDX] = val;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ PDEBUG(D_V4L2, "Set exposure to %d\n", val);
+ err = stv06xx_write_bridge(sd, 0x143d, val & 0xff);
+ if (err < 0)
+ return err;
+
+ err = stv06xx_write_bridge(sd, 0x143e, val >> 8);
+ if (err < 0)
+ return err;
+
+ /* commit settings */
+ err = stv06xx_write_bridge(sd, 0x143f, 0x01);
+ return (err < 0) ? err : 0;
+}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
new file mode 100644
index 0000000..b2d45fe
--- /dev/null
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_st6422.h
@@ -0,0 +1,59 @@
+/*
+ * Support for the sensor part which is integrated (I think) into the
+ * st6422 stv06xx alike bridge, as its integrated there are no i2c writes
+ * but instead direct bridge writes.
+ *
+ * Copyright (c) 2009 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Strongly based on qc-usb-messenger, which is:
+ * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
+ * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
+ * Copyright (c) 2002, 2003 Tuukka Toivonen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef STV06XX_ST6422_H_
+#define STV06XX_ST6422_H_
+
+#include "stv06xx_sensor.h"
+
+static int st6422_probe(struct sd *sd);
+static int st6422_start(struct sd *sd);
+static int st6422_init(struct sd *sd);
+static int st6422_stop(struct sd *sd);
+static void st6422_disconnect(struct sd *sd);
+
+/* V4L2 controls supported by the driver */
+static int st6422_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_contrast(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_contrast(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_gain(struct gspca_dev *gspca_dev, __s32 val);
+static int st6422_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
+static int st6422_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
+
+const struct stv06xx_sensor stv06xx_sensor_st6422 = {
+ .name = "ST6422",
+ .init = st6422_init,
+ .probe = st6422_probe,
+ .start = st6422_start,
+ .stop = st6422_stop,
+ .disconnect = st6422_disconnect,
+};
+
+#endif
diff --git a/drivers/media/video/ivtv/ivtv-controls.c b/drivers/media/video/ivtv/ivtv-controls.c
index 84995bc..a3b77ed 100644
--- a/drivers/media/video/ivtv/ivtv-controls.c
+++ b/drivers/media/video/ivtv/ivtv-controls.c
@@ -60,6 +60,8 @@
switch (qctrl->id) {
/* Standard V4L2 controls */
+ case V4L2_CID_USER_CLASS:
+ return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_BRIGHTNESS:
case V4L2_CID_HUE:
case V4L2_CID_SATURATION:
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 459c04c..4d794b4 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -280,15 +280,9 @@
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < 32 + icd->y_skip_top)
- pix->height = 32 + icd->y_skip_top;
- if (pix->height > 1024 + icd->y_skip_top)
- pix->height = 1024 + icd->y_skip_top;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 1280)
- pix->width = 1280;
- pix->width &= ~0x01; /* has to be even, unsure why was ~3 */
+ v4l_bound_align_image(&pix->width, 48, 1280, 1,
+ &pix->height, 32 + icd->y_skip_top,
+ 1024 + icd->y_skip_top, 0, 0);
return 0;
}
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index f72aeb7c..4207fb34 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -385,17 +385,9 @@
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < MT9T031_MIN_HEIGHT)
- pix->height = MT9T031_MIN_HEIGHT;
- if (pix->height > MT9T031_MAX_HEIGHT)
- pix->height = MT9T031_MAX_HEIGHT;
- if (pix->width < MT9T031_MIN_WIDTH)
- pix->width = MT9T031_MIN_WIDTH;
- if (pix->width > MT9T031_MAX_WIDTH)
- pix->width = MT9T031_MAX_WIDTH;
-
- pix->width &= ~0x01; /* has to be even */
- pix->height &= ~0x01; /* has to be even */
+ v4l_bound_align_image(
+ &pix->width, MT9T031_MIN_WIDTH, MT9T031_MAX_WIDTH, 1,
+ &pix->height, MT9T031_MIN_HEIGHT, MT9T031_MAX_HEIGHT, 1, 0);
return 0;
}
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index be20d31..dbdcc86 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -364,15 +364,9 @@
{
struct v4l2_pix_format *pix = &f->fmt.pix;
- if (pix->height < 32 + icd->y_skip_top)
- pix->height = 32 + icd->y_skip_top;
- if (pix->height > 480 + icd->y_skip_top)
- pix->height = 480 + icd->y_skip_top;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 752)
- pix->width = 752;
- pix->width &= ~0x03; /* ? */
+ v4l_bound_align_image(&pix->width, 48, 752, 2 /* ? */,
+ &pix->height, 32 + icd->y_skip_top,
+ 480 + icd->y_skip_top, 0, 0);
return 0;
}
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 08cfd3e..0bc2cf5 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -211,8 +211,6 @@
static struct usb_device_id device_table [] = {
{ USB_DEVICE(VEND_OMNIVISION, PROD_OV511) },
{ USB_DEVICE(VEND_OMNIVISION, PROD_OV511PLUS) },
- { USB_DEVICE(VEND_OMNIVISION, PROD_OV518) },
- { USB_DEVICE(VEND_OMNIVISION, PROD_OV518PLUS) },
{ USB_DEVICE(VEND_MATTEL, PROD_ME2CAM) },
{ } /* Terminating entry */
};
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
index 10ef1a2..416933c 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -48,11 +48,13 @@
MSP_DSP_IN_SCART),
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
};
void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -65,7 +67,7 @@
pvr2_trace(PVR2_TRACE_CHIPS, "subdev msp3400 v4l2 set_stereo");
if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
+ ((sp = routing_schemes[sid]) != NULL) &&
(hdw->input_val >= 0) &&
(hdw->input_val < sp->cnt)) {
input = sp->def[hdw->input_val];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
index 9023adf..68980e1 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c
@@ -49,11 +49,13 @@
[PVR2_CVAL_INPUT_SVIDEO] = 0,
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_ONAIR] = {
- .def = routing_scheme1,
- .cnt = ARRAY_SIZE(routing_scheme1),
- },
+static const struct routing_scheme routing_def1 = {
+ .def = routing_scheme1,
+ .cnt = ARRAY_SIZE(routing_scheme1),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
};
@@ -65,12 +67,11 @@
u32 input;
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
hdw->input_val);
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- input = sp->def[hdw->input_val];
- } else {
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev v4l2 set_input:"
" Invalid routing scheme (%u)"
@@ -78,6 +79,7 @@
sid, hdw->input_val);
return;
}
+ input = sp->def[hdw->input_val];
sd->ops->audio->s_routing(sd, input, 0, 0);
}
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
index 05e5235..82c1358 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -68,6 +68,11 @@
},
};
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
/* Specific to gotview device */
static const struct routing_scheme_item routing_schemegv[] = {
[PVR2_CVAL_INPUT_TV] = {
@@ -90,15 +95,14 @@
},
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
- [PVR2_ROUTING_SCHEME_GOTVIEW] = {
- .def = routing_schemegv,
- .cnt = ARRAY_SIZE(routing_schemegv),
- },
+static const struct routing_scheme routing_defgv = {
+ .def = routing_schemegv,
+ .cnt = ARRAY_SIZE(routing_schemegv),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
+ [PVR2_ROUTING_SCHEME_GOTVIEW] = &routing_defgv,
};
void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -110,13 +114,11 @@
const struct routing_scheme *sp;
unsigned int sid = hdw->hdw_desc->signal_routing_scheme;
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- vid_input = sp->def[hdw->input_val].vid;
- aud_input = sp->def[hdw->input_val].aud;
- } else {
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev cx2584x set_input:"
" Invalid routing scheme (%u)"
@@ -124,7 +126,8 @@
sid, hdw->input_val);
return;
}
-
+ vid_input = sp->def[hdw->input_val].vid;
+ aud_input = sp->def[hdw->input_val].aud;
pvr2_trace(PVR2_TRACE_CHIPS,
"subdev cx2584x set_input vid=0x%x aud=0x%x",
vid_input, aud_input);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 0c745b1..cbc3887 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -85,8 +85,8 @@
module_param_array(tolerance, int, NULL, 0444);
MODULE_PARM_DESC(tolerance,"specify stream error tolerance");
-/* US Broadcast channel 7 (175.25 MHz) */
-static int default_tv_freq = 175250000L;
+/* US Broadcast channel 3 (61.25 MHz), to help with testing */
+static int default_tv_freq = 61250000L;
/* 104.3 MHz, a usable FM station for my area */
static int default_radio_freq = 104300000L;
@@ -1987,6 +1987,34 @@
}
+static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
+{
+ /*
+ Mike Isely <isely@pobox.com> 19-Nov-2006 - This bit of nuttiness
+ for cx25840 causes that module to correctly set up its video
+ scaling. This is really a problem in the cx25840 module itself,
+ but we work around it here. The problem has not been seen in
+ ivtv because there VBI is supported and set up. We don't do VBI
+ here (at least not yet) and thus we never attempted to even set
+ it up.
+ */
+ struct v4l2_format fmt;
+ if (hdw->decoder_client_id != PVR2_CLIENT_ID_CX25840) {
+ /* We're not using a cx25840 so don't enable the hack */
+ return;
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Module ID %u:"
+ " Executing cx25840 VBI hack",
+ hdw->decoder_client_id);
+ memset(&fmt, 0, sizeof(fmt));
+ fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
+ video, s_fmt, &fmt);
+}
+
+
static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
const struct pvr2_device_client_desc *cd)
{
@@ -2078,30 +2106,6 @@
/* client-specific setup... */
switch (mid) {
case PVR2_CLIENT_ID_CX25840:
- hdw->decoder_client_id = mid;
- {
- /*
- Mike Isely <isely@pobox.com> 19-Nov-2006 - This
- bit of nuttiness for cx25840 causes that module
- to correctly set up its video scaling. This is
- really a problem in the cx25840 module itself,
- but we work around it here. The problem has not
- been seen in ivtv because there VBI is supported
- and set up. We don't do VBI here (at least not
- yet) and thus we never attempted to even set it
- up.
- */
- struct v4l2_format fmt;
- pvr2_trace(PVR2_TRACE_INIT,
- "Module ID %u:"
- " Executing cx25840 VBI hack",
- mid);
- memset(&fmt, 0, sizeof(fmt));
- fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- v4l2_device_call_all(&hdw->v4l2_dev, mid,
- video, s_fmt, &fmt);
- }
- break;
case PVR2_CLIENT_ID_SAA7115:
hdw->decoder_client_id = mid;
break;
@@ -2202,6 +2206,8 @@
cptr->info->set_value(cptr,~0,cptr->info->default_value);
}
+ pvr2_hdw_cx25840_vbi_hack(hdw);
+
/* Set up special default values for the television and radio
frequencies here. It's not really important what these defaults
are, but I set them to something usable in the Chicago area just
@@ -2954,6 +2960,7 @@
vs = hdw->std_mask_cur;
v4l2_device_call_all(&hdw->v4l2_dev, 0,
core, s_std, vs);
+ pvr2_hdw_cx25840_vbi_hack(hdw);
}
hdw->tuner_signal_stale = !0;
hdw->cropcap_stale = !0;
@@ -4076,6 +4083,7 @@
if (hdw->decoder_client_id) {
v4l2_device_call_all(&hdw->v4l2_dev, hdw->decoder_client_id,
core, reset, 0);
+ pvr2_hdw_cx25840_vbi_hack(hdw);
return 0;
}
pvr2_trace(PVR2_TRACE_INIT,
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
index d2fe7c8..4c96cf4 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -54,6 +54,11 @@
[PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2,
};
+static const struct routing_scheme routing_def0 = {
+ .def = routing_scheme0,
+ .cnt = ARRAY_SIZE(routing_scheme0),
+};
+
static const int routing_scheme1[] = {
[PVR2_CVAL_INPUT_TV] = SAA7115_COMPOSITE4,
[PVR2_CVAL_INPUT_RADIO] = SAA7115_COMPOSITE5,
@@ -61,15 +66,14 @@
[PVR2_CVAL_INPUT_SVIDEO] = SAA7115_SVIDEO2, /* or SVIDEO0, it seems */
};
-static const struct routing_scheme routing_schemes[] = {
- [PVR2_ROUTING_SCHEME_HAUPPAUGE] = {
- .def = routing_scheme0,
- .cnt = ARRAY_SIZE(routing_scheme0),
- },
- [PVR2_ROUTING_SCHEME_ONAIR] = {
- .def = routing_scheme1,
- .cnt = ARRAY_SIZE(routing_scheme1),
- },
+static const struct routing_scheme routing_def1 = {
+ .def = routing_scheme1,
+ .cnt = ARRAY_SIZE(routing_scheme1),
+};
+
+static const struct routing_scheme *routing_schemes[] = {
+ [PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
+ [PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
};
void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
@@ -81,12 +85,12 @@
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
hdw->input_val);
- if ((sid < ARRAY_SIZE(routing_schemes)) &&
- ((sp = routing_schemes + sid) != NULL) &&
- (hdw->input_val >= 0) &&
- (hdw->input_val < sp->cnt)) {
- input = sp->def[hdw->input_val];
- } else {
+
+ sp = (sid < ARRAY_SIZE(routing_schemes)) ?
+ routing_schemes[sid] : NULL;
+ if ((sp == NULL) ||
+ (hdw->input_val < 0) ||
+ (hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev v4l2 set_input:"
" Invalid routing scheme (%u)"
@@ -94,6 +98,7 @@
sid, hdw->input_val);
return;
}
+ input = sp->def[hdw->input_val];
sd->ops->video->s_routing(sd, input, 0, 0);
}
}
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index f60de40..46e0d8a 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -163,13 +163,6 @@
CICR0_EOFM | CICR0_FOM)
/*
- * YUV422P picture size should be a multiple of 16, so the heuristic aligns
- * height, width on 4 byte boundaries to reach the 16 multiple for the size.
- */
-#define YUV422P_X_Y_ALIGN 4
-#define YUV422P_SIZE_ALIGN YUV422P_X_Y_ALIGN * YUV422P_X_Y_ALIGN
-
-/*
* Structures
*/
enum pxa_camera_active_dma {
@@ -1398,28 +1391,15 @@
return -EINVAL;
}
- /* limit to pxa hardware capabilities */
- if (pix->height < 32)
- pix->height = 32;
- if (pix->height > 2048)
- pix->height = 2048;
- if (pix->width < 48)
- pix->width = 48;
- if (pix->width > 2048)
- pix->width = 2048;
- pix->width &= ~0x01;
-
/*
- * YUV422P planar format requires images size to be a 16 bytes
- * multiple. If not, zeros will be inserted between Y and U planes, and
- * U and V planes, and YUV422P standard would be violated.
+ * Limit to pxa hardware capabilities. YUV422P planar format requires
+ * images size to be a multiple of 16 bytes. If not, zeros will be
+ * inserted between Y and U planes, and U and V planes, which violates
+ * the YUV422P standard.
*/
- if (xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P) {
- if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN))
- pix->height = ALIGN(pix->height, YUV422P_X_Y_ALIGN);
- if (!IS_ALIGNED(pix->width * pix->height, YUV422P_SIZE_ALIGN))
- pix->width = ALIGN(pix->width, YUV422P_X_Y_ALIGN);
- }
+ v4l_bound_align_image(&pix->width, 48, 2048, 1,
+ &pix->height, 32, 2048, 0,
+ xlate->host_fmt->fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0);
pix->bytesperline = pix->width *
DIV_ROUND_UP(xlate->host_fmt->depth, 8);
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index e305c16..ba87128 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1640,15 +1640,8 @@
}
f->fmt.pix.field = field;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index d369e84..0db88a5 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -689,16 +689,8 @@
/* FIXME: calculate using depth and bus width */
- if (f->fmt.pix.height < 4)
- f->fmt.pix.height = 4;
- if (f->fmt.pix.height > 1920)
- f->fmt.pix.height = 1920;
- if (f->fmt.pix.width < 2)
- f->fmt.pix.width = 2;
- if (f->fmt.pix.width > 2560)
- f->fmt.pix.width = 2560;
- f->fmt.pix.width &= ~0x01;
- f->fmt.pix.height &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 2, 2560, 1,
+ &f->fmt.pix.height, 4, 1920, 2, 0);
f->fmt.pix.bytesperline = f->fmt.pix.width *
DIV_ROUND_UP(xlate->host_fmt->depth, 8);
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index b30c492..b90e9da 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -878,7 +878,7 @@
return rval;
}
-static int __exit tcm825x_remove(struct i2c_client *client)
+static int tcm825x_remove(struct i2c_client *client)
{
struct tcm825x_sensor *sensor = i2c_get_clientdata(client);
@@ -902,7 +902,7 @@
.name = TCM825X_NAME,
},
.probe = tcm825x_probe,
- .remove = __exit_p(tcm825x_remove),
+ .remove = tcm825x_remove,
.id_table = tcm825x_id,
};
diff --git a/drivers/media/video/usbvideo/Kconfig b/drivers/media/video/usbvideo/Kconfig
index e4cb99c..adb1c04 100644
--- a/drivers/media/video/usbvideo/Kconfig
+++ b/drivers/media/video/usbvideo/Kconfig
@@ -38,10 +38,13 @@
module will be called konicawc.
config USB_QUICKCAM_MESSENGER
- tristate "USB Logitech Quickcam Messenger"
+ tristate "USB Logitech Quickcam Messenger (DEPRECATED)"
depends on VIDEO_V4L1
select VIDEO_USBVIDEO
---help---
+ This driver is DEPRECATED please use the gspca stv06xx module
+ instead.
+
Say Y or M here to enable support for the USB Logitech Quickcam
Messenger webcam.
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index f964756..b91d66a 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -802,6 +802,17 @@
/* Decrease the module use count to match the first try_module_get. */
module_put(client->driver->driver.owner);
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config, 0, NULL);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
@@ -852,6 +863,17 @@
/* Decrease the module use count to match the first try_module_get. */
module_put(client->driver->driver.owner);
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config, 0, NULL);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
error:
/* If we have a client but no subdev, then something went wrong and
we must unregister the client. */
@@ -872,6 +894,89 @@
}
EXPORT_SYMBOL_GPL(v4l2_i2c_new_probed_subdev_addr);
+/* Load an i2c sub-device. */
+struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, const char *module_name,
+ struct i2c_board_info *info, const unsigned short *probe_addrs)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct i2c_client *client;
+
+ BUG_ON(!v4l2_dev);
+
+ if (module_name)
+ request_module(module_name);
+
+ /* Create the i2c client */
+ if (info->addr == 0 && probe_addrs)
+ client = i2c_new_probed_device(adapter, info, probe_addrs);
+ else
+ client = i2c_new_device(adapter, info);
+
+ /* Note: by loading the module first we are certain that c->driver
+ will be set if the driver was found. If the module was not loaded
+ first, then the i2c core tries to delay-load the module for us,
+ and then c->driver is still NULL until the module is finally
+ loaded. This delay-load mechanism doesn't work if other drivers
+ want to use the i2c device, so explicitly loading the module
+ is the best alternative. */
+ if (client == NULL || client->driver == NULL)
+ goto error;
+
+ /* Lock the module so we can safely get the v4l2_subdev pointer */
+ if (!try_module_get(client->driver->driver.owner))
+ goto error;
+ sd = i2c_get_clientdata(client);
+
+ /* Register with the v4l2_device which increases the module's
+ use count as well. */
+ if (v4l2_device_register_subdev(v4l2_dev, sd))
+ sd = NULL;
+ /* Decrease the module use count to match the first try_module_get. */
+ module_put(client->driver->driver.owner);
+
+ if (sd) {
+ /* We return errors from v4l2_subdev_call only if we have the
+ callback as the .s_config is not mandatory */
+ int err = v4l2_subdev_call(sd, core, s_config,
+ info->irq, info->platform_data);
+
+ if (err && err != -ENOIOCTLCMD) {
+ v4l2_device_unregister_subdev(sd);
+ sd = NULL;
+ }
+ }
+
+error:
+ /* If we have a client but no subdev, then something went wrong and
+ we must unregister the client. */
+ if (client && sd == NULL)
+ i2c_unregister_device(client);
+ return sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
+
+struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *module_name, const char *client_type,
+ int irq, void *platform_data,
+ u8 addr, const unsigned short *probe_addrs)
+{
+ struct i2c_board_info info;
+
+ /* Setup the i2c board info with the device type and
+ the device address. */
+ memset(&info, 0, sizeof(info));
+ strlcpy(info.type, client_type, sizeof(info.type));
+ info.addr = addr;
+ info.irq = irq;
+ info.platform_data = platform_data;
+
+ return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name,
+ &info, probe_addrs);
+}
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
+
/* Return i2c client address of v4l2_subdev. */
unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
{
@@ -916,4 +1021,78 @@
}
EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
-#endif
+#endif /* defined(CONFIG_I2C) */
+
+/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
+ * and max don't have to be aligned, but there must be at least one valid
+ * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
+ * of 16 between 17 and 31. */
+static unsigned int clamp_align(unsigned int x, unsigned int min,
+ unsigned int max, unsigned int align)
+{
+ /* Bits that must be zero to be aligned */
+ unsigned int mask = ~((1 << align) - 1);
+
+ /* Round to nearest aligned value */
+ if (align)
+ x = (x + (1 << (align - 1))) & mask;
+
+ /* Clamp to aligned value of min and max */
+ if (x < min)
+ x = (min + ~mask) & mask;
+ else if (x > max)
+ x = max & mask;
+
+ return x;
+}
+
+/* Bound an image to have a width between wmin and wmax, and height between
+ * hmin and hmax, inclusive. Additionally, the width will be a multiple of
+ * 2^walign, the height will be a multiple of 2^halign, and the overall size
+ * (width*height) will be a multiple of 2^salign. The image may be shrunk
+ * or enlarged to fit the alignment constraints.
+ *
+ * The width or height maximum must not be smaller than the corresponding
+ * minimum. The alignments must not be so high there are no possible image
+ * sizes within the allowed bounds. wmin and hmin must be at least 1
+ * (don't use 0). If you don't care about a certain alignment, specify 0,
+ * as 2^0 is 1 and one byte alignment is equivalent to no alignment. If
+ * you only want to adjust downward, specify a maximum that's the same as
+ * the initial value.
+ */
+void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
+ unsigned int walign,
+ u32 *h, unsigned int hmin, unsigned int hmax,
+ unsigned int halign, unsigned int salign)
+{
+ *w = clamp_align(*w, wmin, wmax, walign);
+ *h = clamp_align(*h, hmin, hmax, halign);
+
+ /* Usually we don't need to align the size and are done now. */
+ if (!salign)
+ return;
+
+ /* How much alignment do we have? */
+ walign = __ffs(*w);
+ halign = __ffs(*h);
+ /* Enough to satisfy the image alignment? */
+ if (walign + halign < salign) {
+ /* Max walign where there is still a valid width */
+ unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
+ /* Max halign where there is still a valid height */
+ unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
+
+ /* up the smaller alignment until we have enough */
+ do {
+ if (halign >= hmaxa ||
+ (walign <= halign && walign < wmaxa)) {
+ *w = clamp_align(*w, wmin, wmax, walign + 1);
+ walign = __ffs(*w);
+ } else {
+ *h = clamp_align(*h, hmin, hmax, halign + 1);
+ halign = __ffs(*h);
+ }
+ } while (halign + walign < salign);
+ }
+}
+EXPORT_SYMBOL_GPL(v4l_bound_align_image);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index fbfefae..cd72668 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -883,15 +883,8 @@
maxh = norm_maxh();
f->fmt.pix.field = field;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
+ v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
+ &f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index f59b2bd..6c3f23e 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -460,7 +460,7 @@
static int w9968cf_set_window(struct w9968cf_device*, struct video_window);
static int w9968cf_postprocess_frame(struct w9968cf_device*,
struct w9968cf_frame_t*);
-static int w9968cf_adjust_window_size(struct w9968cf_device*, u16* w, u16* h);
+static int w9968cf_adjust_window_size(struct w9968cf_device*, u32 *w, u32 *h);
static void w9968cf_init_framelist(struct w9968cf_device*);
static void w9968cf_push_frame(struct w9968cf_device*, u8 f_num);
static void w9968cf_pop_frame(struct w9968cf_device*,struct w9968cf_frame_t**);
@@ -1763,8 +1763,7 @@
#define UNSC(x) ((x) >> 10)
/* Make sure we are using a supported resolution */
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width,
- (u16*)&win.height)))
+ if ((err = w9968cf_adjust_window_size(cam, &win.width, &win.height)))
goto error;
/* Scaling factors */
@@ -1914,12 +1913,9 @@
Return 0 on success, -1 otherwise.
--------------------------------------------------------------------------*/
static int
-w9968cf_adjust_window_size(struct w9968cf_device* cam, u16* width, u16* height)
+w9968cf_adjust_window_size(struct w9968cf_device *cam, u32 *width, u32 *height)
{
- u16 maxw, maxh;
-
- if ((*width < cam->minwidth) || (*height < cam->minheight))
- return -ERANGE;
+ unsigned int maxw, maxh, align;
maxw = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) &&
w9968cf_vpp ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
@@ -1927,16 +1923,10 @@
maxh = cam->upscaling && !(cam->vpp_flag & VPP_DECOMPRESSION) &&
w9968cf_vpp ? max((u16)W9968CF_MAX_HEIGHT, cam->maxheight)
: cam->maxheight;
+ align = (cam->vpp_flag & VPP_DECOMPRESSION) ? 4 : 0;
- if (*width > maxw)
- *width = maxw;
- if (*height > maxh)
- *height = maxh;
-
- if (cam->vpp_flag & VPP_DECOMPRESSION) {
- *width &= ~15L; /* multiple of 16 */
- *height &= ~15L;
- }
+ v4l_bound_align_image(width, cam->minwidth, maxw, align,
+ height, cam->minheight, maxh, align, 0);
PDBGG("Window size adjusted w=%u, h=%u ", *width, *height)
@@ -3043,8 +3033,8 @@
if (win.clipcount != 0 || win.flags != 0)
return -EINVAL;
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&win.width,
- (u16*)&win.height))) {
+ if ((err = w9968cf_adjust_window_size(cam, &win.width,
+ &win.height))) {
DBG(4, "Resolution not supported (%ux%u). "
"VIDIOCSWIN failed", win.width, win.height)
return err;
@@ -3116,6 +3106,7 @@
{
struct video_mmap mmap;
struct w9968cf_frame_t* fr;
+ u32 w, h;
int err = 0;
if (copy_from_user(&mmap, arg, sizeof(mmap)))
@@ -3164,8 +3155,10 @@
}
}
- if ((err = w9968cf_adjust_window_size(cam, (u16*)&mmap.width,
- (u16*)&mmap.height))) {
+ w = mmap.width; h = mmap.height;
+ err = w9968cf_adjust_window_size(cam, &w, &h);
+ mmap.width = w; mmap.height = h;
+ if (err) {
DBG(4, "Resolution not supported (%dx%d). "
"VIDIOCMCAPTURE failed",
mmap.width, mmap.height)
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 643ccca..3d7df32 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -2088,16 +2088,10 @@
return -EINVAL;
}
- bpp = (zoran_formats[i].depth + 7) / 8;
- fmt->fmt.pix.width &= ~((bpp == 2) ? 1 : 3);
- if (fmt->fmt.pix.width > BUZ_MAX_WIDTH)
- fmt->fmt.pix.width = BUZ_MAX_WIDTH;
- if (fmt->fmt.pix.width < BUZ_MIN_WIDTH)
- fmt->fmt.pix.width = BUZ_MIN_WIDTH;
- if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT)
- fmt->fmt.pix.height = BUZ_MAX_HEIGHT;
- if (fmt->fmt.pix.height < BUZ_MIN_HEIGHT)
- fmt->fmt.pix.height = BUZ_MIN_HEIGHT;
+ bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
+ v4l_bound_align_image(
+ &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
+ &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
mutex_unlock(&zr->resource_lock);
return 0;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 20e0b44..55ff252 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -3518,7 +3518,7 @@
} else
mptsas_volume_delete(ioc, sas_info->fw.id);
}
- mutex_lock(&ioc->sas_device_info_mutex);
+ mutex_unlock(&ioc->sas_device_info_mutex);
/* expanders */
mutex_lock(&ioc->sas_topology_mutex);
@@ -3549,7 +3549,7 @@
goto redo_expander_scan;
}
}
- mutex_lock(&ioc->sas_topology_mutex);
+ mutex_unlock(&ioc->sas_topology_mutex);
}
/**
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index cd1008c..ca54996 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -101,6 +101,12 @@
#define twl_has_usb() false
#endif
+#if defined(CONFIG_TWL4030_WATCHDOG) || \
+ defined(CONFIG_TWL4030_WATCHDOG_MODULE)
+#define twl_has_watchdog() true
+#else
+#define twl_has_watchdog() false
+#endif
/* Triton Core internal information (BEGIN) */
@@ -526,6 +532,12 @@
usb_transceiver = child;
}
+ if (twl_has_watchdog()) {
+ child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+ }
+
if (twl_has_regulator()) {
/*
child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1);
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c240454..8664fee 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -46,6 +46,7 @@
#define MANUFACTURER_INTEL 0x0089
#define I82802AB 0x00ad
#define I82802AC 0x00ac
+#define PF38F4476 0x881c
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -315,10 +316,20 @@
{ 0, 0, NULL, NULL }
};
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_intelext *extp)
+{
+ if (cfi->mfr == MANUFACTURER_INTEL &&
+ cfi->id == PF38F4476 && extp->MinorVersion == '3')
+ extp->MinorVersion = '1';
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
+ struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp;
+ unsigned int extra_size = 0;
unsigned int extp_size = sizeof(*extp);
again:
@@ -326,6 +337,8 @@
if (!extp)
return NULL;
+ cfi_fixup_major_minor(cfi, extp);
+
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
@@ -340,19 +353,24 @@
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
- if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
- unsigned int extra_size = 0;
- int nb_parts, i;
+ if (extp->MinorVersion >= '0') {
+ extra_size = 0;
/* Protection Register info */
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
+ }
+ if (extp->MinorVersion >= '1') {
/* Burst Read info */
extra_size += 2;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
- extra_size += extp->extra[extra_size-1];
+ extra_size += extp->extra[extra_size - 1];
+ }
+
+ if (extp->MinorVersion >= '3') {
+ int nb_parts, i;
/* Number of hardware-partitions */
extra_size += 1;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index e824b9b..ccc4cfc 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -166,6 +166,7 @@
#define SST39LF040 0x00D7
#define SST39SF010A 0x00B5
#define SST39SF020A 0x00B6
+#define SST39SF040 0x00B7
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -1393,6 +1394,18 @@
}
}, {
.mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39SF040,
+ .name = "SST 39SF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cc6369e..59c4612 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -500,6 +500,9 @@
{ "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
{ "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
+ /* Macronix */
+ { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
+
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
@@ -528,6 +531,7 @@
{ "m25p64", 0x202017, 0, 64 * 1024, 128, },
{ "m25p128", 0x202018, 0, 256 * 1024, 64, },
+ { "m45pe10", 0x204011, 0, 64 * 1024, 2, },
{ "m45pe80", 0x204014, 0, 64 * 1024, 16, },
{ "m45pe16", 0x204015, 0, 64 * 1024, 32, },
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 82923bd..0b98654 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -105,15 +105,6 @@
default "0x02000000"
depends on MSP_FLASH_MAP_LIMIT_32M
-config MTD_PMC_MSP_RAMROOT
- tristate "Embedded RAM block device for root on PMC-Sierra MSP"
- depends on PMC_MSP_EMBEDDED_ROOTFS && \
- (MTD_BLOCK || MTD_BLOCK_RO) && \
- MTD_RAM
- help
- This provides support for the embedded root file system
- on PMC MSP devices. This memory is mapped as a MTD block device.
-
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
depends on SPARC && MTD_CFI && PCI
@@ -270,7 +261,7 @@
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT
+ depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -501,7 +492,7 @@
If compiled as a module, it will be called bfin-async-flash.
config MTD_UCLINUX
- tristate "Generic uClinux RAM/ROM filesystem support"
+ bool "Generic uClinux RAM/ROM filesystem support"
depends on MTD_PARTITIONS && MTD_RAM && !MMU
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 2dbc1be..8bae7f9 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -25,7 +25,6 @@
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
-obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 576611f..365c77b 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -40,6 +40,9 @@
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
};
static void switch_to_flash(struct async_state *state)
@@ -170,6 +173,7 @@
if (ret > 0) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
add_mtd_partitions(state->mtd, pdata->parts, ret);
+ state->parts = pdata->parts;
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
@@ -193,6 +197,7 @@
gpio_free(state->enet_flash_pin);
#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(state->mtd);
+ kfree(state->parts);
#endif
map_destroy(state->mtd);
kfree(state);
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index c9681a3..b08a798 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -36,27 +36,33 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <asm/mach/flash.h>
#include <mach/hardware.h>
#include <asm/system.h>
-#ifdef CONFIG_ARCH_P720T
-#define FLASH_BASE (0x04000000)
-#define FLASH_SIZE (64*1024*1024)
-#endif
+#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
+
+struct armflash_subdev_info {
+ char name[SUBDEV_NAME_SIZE];
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct flash_platform_data *plat;
+};
struct armflash_info {
- struct flash_platform_data *plat;
struct resource *res;
struct mtd_partition *parts;
struct mtd_info *mtd;
- struct map_info map;
+ int nr_subdev;
+ struct armflash_subdev_info subdev[0];
};
static void armflash_set_vpp(struct map_info *map, int on)
{
- struct armflash_info *info = container_of(map, struct armflash_info, map);
+ struct armflash_subdev_info *info =
+ container_of(map, struct armflash_subdev_info, map);
if (info->plat && info->plat->set_vpp)
info->plat->set_vpp(on);
@@ -64,32 +70,17 @@
static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL };
-static int armflash_probe(struct platform_device *dev)
+static int armflash_subdev_probe(struct armflash_subdev_info *subdev,
+ struct resource *res)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
- struct resource *res = dev->resource;
- unsigned int size = res->end - res->start + 1;
- struct armflash_info *info;
- int err;
+ struct flash_platform_data *plat = subdev->plat;
+ resource_size_t size = res->end - res->start + 1;
void __iomem *base;
+ int err = 0;
- info = kzalloc(sizeof(struct armflash_info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->plat = plat;
- if (plat && plat->init) {
- err = plat->init();
- if (err)
- goto no_resource;
- }
-
- info->res = request_mem_region(res->start, size, "armflash");
- if (!info->res) {
+ if (!request_mem_region(res->start, size, subdev->name)) {
err = -EBUSY;
- goto no_resource;
+ goto out;
}
base = ioremap(res->start, size);
@@ -101,27 +92,132 @@
/*
* look for CFI based flash parts fitted to this board
*/
- info->map.size = size;
- info->map.bankwidth = plat->width;
- info->map.phys = res->start;
- info->map.virt = base;
- info->map.name = dev_name(&dev->dev);
- info->map.set_vpp = armflash_set_vpp;
+ subdev->map.size = size;
+ subdev->map.bankwidth = plat->width;
+ subdev->map.phys = res->start;
+ subdev->map.virt = base;
+ subdev->map.name = subdev->name;
+ subdev->map.set_vpp = armflash_set_vpp;
- simple_map_init(&info->map);
+ simple_map_init(&subdev->map);
/*
* Also, the CFI layer automatically works out what size
* of chips we have, and does the necessary identification
* for us automatically.
*/
- info->mtd = do_map_probe(plat->map_name, &info->map);
- if (!info->mtd) {
+ subdev->mtd = do_map_probe(plat->map_name, &subdev->map);
+ if (!subdev->mtd) {
err = -ENXIO;
goto no_device;
}
- info->mtd->owner = THIS_MODULE;
+ subdev->mtd->owner = THIS_MODULE;
+
+ /* Successful? */
+ if (err == 0)
+ return err;
+
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ no_device:
+ iounmap(base);
+ no_mem:
+ release_mem_region(res->start, size);
+ out:
+ return err;
+}
+
+static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
+{
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ if (subdev->map.virt)
+ iounmap(subdev->map.virt);
+ release_mem_region(subdev->map.phys, subdev->map.size);
+}
+
+static int armflash_probe(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ unsigned int size;
+ struct armflash_info *info;
+ int i, nr, err;
+
+ /* Count the number of devices */
+ for (nr = 0; ; nr++)
+ if (!platform_get_resource(dev, IORESOURCE_MEM, nr))
+ break;
+ if (nr == 0) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ size = sizeof(struct armflash_info) +
+ sizeof(struct armflash_subdev_info) * nr;
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (plat && plat->init) {
+ err = plat->init();
+ if (err)
+ goto no_resource;
+ }
+
+ for (i = 0; i < nr; i++) {
+ struct armflash_subdev_info *subdev = &info->subdev[i];
+ struct resource *res;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ if (nr == 1)
+ /* No MTD concatenation, just use the default name */
+ snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s",
+ dev_name(&dev->dev));
+ else
+ snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d",
+ dev_name(&dev->dev), i);
+ subdev->plat = plat;
+
+ err = armflash_subdev_probe(subdev, res);
+ if (err)
+ break;
+ }
+ info->nr_subdev = i;
+
+ if (err)
+ goto subdev_err;
+
+ if (info->nr_subdev == 1)
+ info->mtd = info->subdev[0].mtd;
+ else if (info->nr_subdev > 1) {
+#ifdef CONFIG_MTD_CONCAT
+ struct mtd_info *cdev[info->nr_subdev];
+
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ for (i = 0; i < info->nr_subdev; i++)
+ cdev[i] = info->subdev[i].mtd;
+
+ info->mtd = mtd_concat_create(cdev, info->nr_subdev,
+ dev_name(&dev->dev));
+ if (info->mtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "armflash: multiple devices found but "
+ "MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
+ }
+
+ if (err < 0)
+ goto cleanup;
err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0);
if (err > 0) {
@@ -131,28 +227,30 @@
"mtd partition registration failed: %d\n", err);
}
- if (err == 0)
+ if (err == 0) {
platform_set_drvdata(dev, info);
+ return err;
+ }
/*
- * If we got an error, free all resources.
+ * We got an error, free all resources.
*/
- if (err < 0) {
- if (info->mtd) {
- del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
- }
- kfree(info->parts);
-
- no_device:
- iounmap(base);
- no_mem:
- release_mem_region(res->start, size);
- no_resource:
- if (plat && plat->exit)
- plat->exit();
- kfree(info);
+ cleanup:
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
+ kfree(info->parts);
+ subdev_err:
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
+ no_resource:
+ if (plat && plat->exit)
+ plat->exit();
+ kfree(info);
out:
return err;
}
@@ -160,22 +258,26 @@
static int armflash_remove(struct platform_device *dev)
{
struct armflash_info *info = platform_get_drvdata(dev);
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ int i;
platform_set_drvdata(dev, NULL);
if (info) {
if (info->mtd) {
del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
kfree(info->parts);
- iounmap(info->map.virt);
- release_resource(info->res);
- kfree(info->res);
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
- if (info->plat && info->plat->exit)
- info->plat->exit();
+ if (plat && plat->exit)
+ plat->exit();
kfree(info);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 29a9011..380648e 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -195,42 +195,6 @@
}
#ifdef CONFIG_PM
-static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend) {
- ret = info->mtd[i]->suspend(info->mtd[i]);
- if (ret)
- goto fail;
- }
-
- return 0;
-fail:
- for (--i; i >= 0; --i)
- if (info->mtd[i]->suspend) {
- BUG_ON(!info->mtd[i]->resume);
- info->mtd[i]->resume(info->mtd[i]);
- }
-
- return ret;
-}
-
-static int physmap_flash_resume(struct platform_device *dev)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->resume)
- info->mtd[i]->resume(info->mtd[i]);
-
- return 0;
-}
-
static void physmap_flash_shutdown(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
@@ -242,16 +206,12 @@
info->mtd[i]->resume(info->mtd[i]);
}
#else
-#define physmap_flash_suspend NULL
-#define physmap_flash_resume NULL
#define physmap_flash_shutdown NULL
#endif
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
- .suspend = physmap_flash_suspend,
- .resume = physmap_flash_resume,
.shutdown = physmap_flash_shutdown,
.driver = {
.name = "physmap-flash",
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c83a60f..39d357b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -20,16 +20,23 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+struct of_flash_list {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+};
+
struct of_flash {
- struct mtd_info *mtd;
- struct map_info map;
- struct resource *res;
+ struct mtd_info *cmtd;
#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
#endif
+ int list_size; /* number of elements in of_flash_list */
+ struct of_flash_list list[0];
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -88,30 +95,44 @@
static int of_flash_remove(struct of_device *dev)
{
struct of_flash *info;
+ int i;
info = dev_get_drvdata(&dev->dev);
if (!info)
return 0;
dev_set_drvdata(&dev->dev, NULL);
- if (info->mtd) {
+#ifdef CONFIG_MTD_CONCAT
+ if (info->cmtd != info->list[0].mtd) {
+ del_mtd_device(info->cmtd);
+ mtd_concat_destroy(info->cmtd);
+ }
+#endif
+
+ if (info->cmtd) {
if (OF_FLASH_PARTS(info)) {
- del_mtd_partitions(info->mtd);
+ del_mtd_partitions(info->cmtd);
kfree(OF_FLASH_PARTS(info));
} else {
- del_mtd_device(info->mtd);
+ del_mtd_device(info->cmtd);
}
- map_destroy(info->mtd);
}
- if (info->map.virt)
- iounmap(info->map.virt);
+ for (i = 0; i < info->list_size; i++) {
+ if (info->list[i].mtd)
+ map_destroy(info->list[i].mtd);
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
+ if (info->list[i].map.virt)
+ iounmap(info->list[i].map.virt);
+
+ if (info->list[i].res) {
+ release_resource(info->list[i].res);
+ kfree(info->list[i].res);
+ }
}
+ kfree(info);
+
return 0;
}
@@ -164,68 +185,130 @@
const char *probe_type = match->data;
const u32 *width;
int err;
+ int i;
+ int count;
+ const u32 *p;
+ int reg_tuple_size;
+ struct mtd_info **mtd_list = NULL;
- err = -ENXIO;
- if (of_address_to_resource(dp, 0, &res)) {
- dev_err(&dev->dev, "Can't get IO address from device tree\n");
+ reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+
+ /*
+ * Get number of "reg" tuples. Scan for MTD devices on area's
+ * described by each "reg" region. This makes it possible (including
+ * the concat support) to support the Intel P30 48F4400 chips which
+ * consists internally of 2 non-identical NOR chips on one die.
+ */
+ p = of_get_property(dp, "reg", &count);
+ if (count % reg_tuple_size != 0) {
+ dev_err(&dev->dev, "Malformed reg property on %s\n",
+ dev->node->full_name);
+ err = -EINVAL;
goto err_out;
}
-
- dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
- (unsigned long long)res.start, (unsigned long long)res.end);
+ count /= reg_tuple_size;
err = -ENOMEM;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ if (!info)
+ goto err_out;
+
+ mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
if (!info)
goto err_out;
dev_set_drvdata(&dev->dev, info);
- err = -EBUSY;
- info->res = request_mem_region(res.start, res.end - res.start + 1,
- dev_name(&dev->dev));
- if (!info->res)
- goto err_out;
+ for (i = 0; i < count; i++) {
+ err = -ENXIO;
+ if (of_address_to_resource(dp, i, &res)) {
+ dev_err(&dev->dev, "Can't get IO address from device"
+ " tree\n");
+ goto err_out;
+ }
- err = -ENXIO;
- width = of_get_property(dp, "bank-width", NULL);
- if (!width) {
- dev_err(&dev->dev, "Can't get bank width from device tree\n");
- goto err_out;
+ dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
+ (unsigned long long)res.start,
+ (unsigned long long)res.end);
+
+ err = -EBUSY;
+ info->list[i].res = request_mem_region(res.start, res.end -
+ res.start + 1,
+ dev_name(&dev->dev));
+ if (!info->list[i].res)
+ goto err_out;
+
+ err = -ENXIO;
+ width = of_get_property(dp, "bank-width", NULL);
+ if (!width) {
+ dev_err(&dev->dev, "Can't get bank width from device"
+ " tree\n");
+ goto err_out;
+ }
+
+ info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.phys = res.start;
+ info->list[i].map.size = res.end - res.start + 1;
+ info->list[i].map.bankwidth = *width;
+
+ err = -ENOMEM;
+ info->list[i].map.virt = ioremap(info->list[i].map.phys,
+ info->list[i].map.size);
+ if (!info->list[i].map.virt) {
+ dev_err(&dev->dev, "Failed to ioremap() flash"
+ " region\n");
+ goto err_out;
+ }
+
+ simple_map_init(&info->list[i].map);
+
+ if (probe_type) {
+ info->list[i].mtd = do_map_probe(probe_type,
+ &info->list[i].map);
+ } else {
+ info->list[i].mtd = obsolete_probe(dev,
+ &info->list[i].map);
+ }
+ mtd_list[i] = info->list[i].mtd;
+
+ err = -ENXIO;
+ if (!info->list[i].mtd) {
+ dev_err(&dev->dev, "do_map_probe() failed\n");
+ goto err_out;
+ } else {
+ info->list_size++;
+ }
+ info->list[i].mtd->owner = THIS_MODULE;
+ info->list[i].mtd->dev.parent = &dev->dev;
}
- info->map.name = dev_name(&dev->dev);
- info->map.phys = res.start;
- info->map.size = res.end - res.start + 1;
- info->map.bankwidth = *width;
-
- err = -ENOMEM;
- info->map.virt = ioremap(info->map.phys, info->map.size);
- if (!info->map.virt) {
- dev_err(&dev->dev, "Failed to ioremap() flash region\n");
- goto err_out;
+ err = 0;
+ if (info->list_size == 1) {
+ info->cmtd = info->list[0].mtd;
+ } else if (info->list_size > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ info->cmtd = mtd_concat_create(mtd_list, info->list_size,
+ dev_name(&dev->dev));
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "physmap_of: multiple devices "
+ "found but MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
}
-
- simple_map_init(&info->map);
-
- if (probe_type)
- info->mtd = do_map_probe(probe_type, &info->map);
- else
- info->mtd = obsolete_probe(dev, &info->map);
-
- err = -ENXIO;
- if (!info->mtd) {
- dev_err(&dev->dev, "do_map_probe() failed\n");
+ if (err)
goto err_out;
- }
- info->mtd->owner = THIS_MODULE;
- info->mtd->dev.parent = &dev->dev;
#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- err = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
+ err = parse_mtd_partitions(info->cmtd, part_probe_types,
+ &info->parts, 0);
if (err < 0)
return err;
@@ -244,15 +327,19 @@
}
if (err > 0)
- add_mtd_partitions(info->mtd, info->parts, err);
+ add_mtd_partitions(info->cmtd, info->parts, err);
else
#endif
- add_mtd_device(info->mtd);
+ add_mtd_device(info->cmtd);
+
+ kfree(mtd_list);
return 0;
err_out:
+ kfree(mtd_list);
of_flash_remove(dev);
+
return err;
}
diff --git a/drivers/mtd/maps/pmcmsp-ramroot.c b/drivers/mtd/maps/pmcmsp-ramroot.c
deleted file mode 100644
index 30de5c0..0000000
--- a/drivers/mtd/maps/pmcmsp-ramroot.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Mapping of the rootfs in a physical region of memory
- *
- * Copyright (C) 2005-2007 PMC-Sierra Inc.
- * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/root_dev.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-#include <asm/io.h>
-
-#include <msp_prom.h>
-
-static struct mtd_info *rr_mtd;
-
-struct map_info rr_map = {
- .name = "ramroot",
- .bankwidth = 4,
-};
-
-static int __init init_rrmap(void)
-{
- void *ramroot_start;
- unsigned long ramroot_size;
-
- /* Check for supported rootfs types */
- if (get_ramroot(&ramroot_start, &ramroot_size)) {
- rr_map.phys = CPHYSADDR(ramroot_start);
- rr_map.size = ramroot_size;
-
- printk(KERN_NOTICE
- "PMC embedded root device: 0x%08lx @ 0x%08lx\n",
- rr_map.size, (unsigned long)rr_map.phys);
- } else {
- printk(KERN_ERR
- "init_rrmap: no supported embedded rootfs detected!\n");
- return -ENXIO;
- }
-
- /* Map rootfs to I/O space for block device driver */
- rr_map.virt = ioremap(rr_map.phys, rr_map.size);
- if (!rr_map.virt) {
- printk(KERN_ERR "Failed to ioremap\n");
- return -EIO;
- }
-
- simple_map_init(&rr_map);
-
- rr_mtd = do_map_probe("map_ram", &rr_map);
- if (rr_mtd) {
- rr_mtd->owner = THIS_MODULE;
-
- add_mtd_device(rr_mtd);
-
- return 0;
- }
-
- iounmap(rr_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_rrmap(void)
-{
- del_mtd_device(rr_mtd);
- map_destroy(rr_mtd);
-
- iounmap(rr_map.virt);
- rr_map.virt = NULL;
-}
-
-MODULE_AUTHOR("PMC-Sierra, Inc");
-MODULE_DESCRIPTION("MTD map driver for embedded PMC-Sierra MSP filesystem");
-MODULE_LICENSE("GPL");
-
-module_init(init_rrmap);
-module_exit(cleanup_rrmap);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 572d32f..643aa06 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -140,24 +140,6 @@
}
#ifdef CONFIG_PM
-static int pxa2xx_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info->mtd && info->mtd->suspend)
- ret = info->mtd->suspend(info->mtd);
- return ret;
-}
-
-static int pxa2xx_flash_resume(struct platform_device *dev)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd && info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -166,8 +148,6 @@
info->mtd->resume(info->mtd);
}
#else
-#define pxa2xx_flash_suspend NULL
-#define pxa2xx_flash_resume NULL
#define pxa2xx_flash_shutdown NULL
#endif
@@ -178,8 +158,6 @@
},
.probe = pxa2xx_flash_probe,
.remove = __devexit_p(pxa2xx_flash_remove),
- .suspend = pxa2xx_flash_suspend,
- .resume = pxa2xx_flash_resume,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index d39f0ad..83ed645 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -145,25 +145,6 @@
}
#ifdef CONFIG_PM
-static int rbtx4939_flash_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->suspend)
- return info->mtd->suspend(info->mtd);
- return 0;
-}
-
-static int rbtx4939_flash_resume(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
@@ -173,16 +154,12 @@
info->mtd->resume(info->mtd);
}
#else
-#define rbtx4939_flash_suspend NULL
-#define rbtx4939_flash_resume NULL
#define rbtx4939_flash_shutdown NULL
#endif
static struct platform_driver rbtx4939_flash_driver = {
.probe = rbtx4939_flash_probe,
.remove = rbtx4939_flash_remove,
- .suspend = rbtx4939_flash_suspend,
- .resume = rbtx4939_flash_resume,
.shutdown = rbtx4939_flash_shutdown,
.driver = {
.name = "rbtx4939-flash",
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 05e9362..c6210f5 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -415,25 +415,6 @@
}
#ifdef CONFIG_PM
-static int sa1100_mtd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info)
- ret = info->mtd->suspend(info->mtd);
-
- return ret;
-}
-
-static int sa1100_mtd_resume(struct platform_device *dev)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- if (info)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
@@ -441,16 +422,12 @@
info->mtd->resume(info->mtd);
}
#else
-#define sa1100_mtd_suspend NULL
-#define sa1100_mtd_resume NULL
#define sa1100_mtd_shutdown NULL
#endif
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
.remove = __exit_p(sa1100_mtd_remove),
- .suspend = sa1100_mtd_suspend,
- .resume = sa1100_mtd_resume,
.shutdown = sa1100_mtd_shutdown,
.driver = {
.name = "sa1100-mtd",
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 81756e3..d4314fb 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -22,15 +22,19 @@
/****************************************************************************/
+extern char _ebss;
+
struct map_info uclinux_ram_map = {
.name = "RAM",
+ .phys = (unsigned long)&_ebss,
+ .size = 0,
};
-struct mtd_info *uclinux_ram_mtdinfo;
+static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-struct mtd_partition uclinux_romfs[] = {
+static struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
@@ -38,7 +42,7 @@
/****************************************************************************/
-int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
+static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
@@ -55,12 +59,10 @@
{
struct mtd_info *mtd;
struct map_info *mapp;
- extern char _ebss;
- unsigned long addr = (unsigned long) &_ebss;
mapp = &uclinux_ram_map;
- mapp->phys = addr;
- mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(addr + 8))));
+ if (!mapp->size)
+ mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index aaac3b6..c3f6265 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -291,7 +291,7 @@
gd->private_data = new;
new->blkcore_priv = gd;
gd->queue = tr->blkcore_priv->rq;
- gd->driverfs_dev = new->mtd->dev.parent;
+ gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
set_disk_ro(gd, 1);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 763d3f0..5b081cb 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
+#include <linux/compat.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
@@ -355,6 +356,100 @@
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
+static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ uint32_t retlen;
+ int ret = 0;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->write_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(ops.oobbuf, ptr, length)) {
+ kfree(ops.oobbuf);
+ return -EFAULT;
+ }
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->write_oob(mtd, start, &ops);
+
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(retp, &retlen, sizeof(length)))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
+static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
+ uint32_t length, void __user *ptr, uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ int ret = 0;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->read_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_WRITE, ptr,
+ length) ? 0 : -EFAULT;
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->read_oob(mtd, start, &ops);
+
+ if (put_user(ops.oobretlen, retp))
+ ret = -EFAULT;
+ else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
+ ops.oobretlen))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
static int mtd_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg)
{
@@ -417,6 +512,7 @@
break;
case MEMERASE:
+ case MEMERASE64:
{
struct erase_info *erase;
@@ -427,20 +523,32 @@
if (!erase)
ret = -ENOMEM;
else {
- struct erase_info_user einfo;
-
wait_queue_head_t waitq;
DECLARE_WAITQUEUE(wait, current);
init_waitqueue_head(&waitq);
- if (copy_from_user(&einfo, argp,
- sizeof(struct erase_info_user))) {
- kfree(erase);
- return -EFAULT;
+ if (cmd == MEMERASE64) {
+ struct erase_info_user64 einfo64;
+
+ if (copy_from_user(&einfo64, argp,
+ sizeof(struct erase_info_user64))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo64.start;
+ erase->len = einfo64.length;
+ } else {
+ struct erase_info_user einfo32;
+
+ if (copy_from_user(&einfo32, argp,
+ sizeof(struct erase_info_user))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo32.start;
+ erase->len = einfo32.length;
}
- erase->addr = einfo.start;
- erase->len = einfo.length;
erase->mtd = mtd;
erase->callback = mtdchar_erase_callback;
erase->priv = (unsigned long)&waitq;
@@ -474,100 +582,56 @@
case MEMWRITEOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
- struct mtd_oob_buf __user *user_buf = argp;
- uint32_t retlen;
+ struct mtd_oob_buf __user *buf_user = argp;
- if(!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
-
- if (!mtd->write_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_READ, buf.ptr,
- buf.length) ? 0 : EFAULT;
-
- if (ret)
- return ret;
-
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
-
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
-
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
-
- if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
- kfree(ops.oobbuf);
- return -EFAULT;
- }
-
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->write_oob(mtd, buf.start, &ops);
-
- if (ops.oobretlen > 0xFFFFFFFFU)
- ret = -EOVERFLOW;
- retlen = ops.oobretlen;
- if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
+ /* NOTE: writes return length to buf_user->length */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
-
- kfree(ops.oobbuf);
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->length);
break;
-
}
case MEMREADOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
+ struct mtd_oob_buf __user *buf_user = argp;
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
-
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
+ /* NOTE: writes return length to buf_user->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
else
- ret = access_ok(VERIFY_WRITE, buf.ptr,
- buf.length) ? 0 : -EFAULT;
- if (ret)
- return ret;
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->start);
+ break;
+ }
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ case MEMWRITEOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
-
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
-
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->read_oob(mtd, buf.start, &ops);
-
- if (put_user(ops.oobretlen, (uint32_t __user *)argp))
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
- else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
- ops.oobretlen))
- ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
- kfree(ops.oobbuf);
+ case MEMREADOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
break;
}
@@ -758,6 +822,68 @@
return ret;
} /* memory_ioctl */
+#ifdef CONFIG_COMPAT
+
+struct mtd_oob_buf32 {
+ u_int32_t start;
+ u_int32_t length;
+ compat_caddr_t ptr; /* unsigned char* */
+};
+
+#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
+#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
+
+static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ void __user *argp = compat_ptr(arg);
+ int ret = 0;
+
+ lock_kernel();
+
+ switch (cmd) {
+ case MEMWRITEOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_readoob(mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->start);
+ break;
+ }
+ default:
+ ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
+ }
+
+ unlock_kernel();
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
/*
* try to determine where a shared mapping can be made
* - only supported for NOMMU at the moment (MMU can't doesn't copy private
@@ -817,6 +943,9 @@
.read = mtd_read,
.write = mtd_write,
.ioctl = mtd_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtd_compat_ioctl,
+#endif
.open = mtd_open,
.release = mtd_close,
.mmap = mtd_mmap,
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index bccb4b1..fac54a3 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -23,8 +23,15 @@
#include "mtdcore.h"
+static int mtd_cls_suspend(struct device *dev, pm_message_t state);
+static int mtd_cls_resume(struct device *dev);
-static struct class *mtd_class;
+static struct class mtd_class = {
+ .name = "mtd",
+ .owner = THIS_MODULE,
+ .suspend = mtd_cls_suspend,
+ .resume = mtd_cls_resume,
+};
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@@ -52,7 +59,26 @@
/* remove /dev/mtdXro node if needed */
if (index)
- device_destroy(mtd_class, index + 1);
+ device_destroy(&mtd_class, index + 1);
+}
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd->suspend)
+ return mtd->suspend(mtd);
+ else
+ return 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd->resume)
+ mtd->resume(mtd);
+ return 0;
}
static ssize_t mtd_type_show(struct device *dev,
@@ -269,7 +295,7 @@
* physical device.
*/
mtd->dev.type = &mtd_devtype;
- mtd->dev.class = mtd_class;
+ mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
if (device_register(&mtd->dev) != 0) {
@@ -278,7 +304,7 @@
}
if (MTD_DEVT(i))
- device_create(mtd_class, mtd->dev.parent,
+ device_create(&mtd_class, mtd->dev.parent,
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
@@ -604,11 +630,12 @@
static int __init init_mtd(void)
{
- mtd_class = class_create(THIS_MODULE, "mtd");
+ int ret;
+ ret = class_register(&mtd_class);
- if (IS_ERR(mtd_class)) {
- pr_err("Error creating mtd class.\n");
- return PTR_ERR(mtd_class);
+ if (ret) {
+ pr_err("Error registering mtd class: %d\n", ret);
+ return ret;
}
#ifdef CONFIG_PROC_FS
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
@@ -623,7 +650,7 @@
if (proc_mtd)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
- class_destroy(mtd_class);
+ class_unregister(&mtd_class);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 29675ed..349fcbe 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -27,9 +27,7 @@
struct mtd_info mtd;
struct mtd_info *master;
uint64_t offset;
- int index;
struct list_head list;
- int registered;
};
/*
@@ -321,8 +319,7 @@
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
list_del(&slave->list);
- if (slave->registered)
- del_mtd_device(&slave->mtd);
+ del_mtd_device(&slave->mtd);
kfree(slave);
}
@@ -395,7 +392,7 @@
slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
if (master->sync)
slave->mtd.sync = part_sync;
- if (!partno && master->suspend && master->resume) {
+ if (!partno && !master->dev.class && master->suspend && master->resume) {
slave->mtd.suspend = part_suspend;
slave->mtd.resume = part_resume;
}
@@ -412,7 +409,6 @@
slave->mtd.erase = part_erase;
slave->master = master;
slave->offset = part->offset;
- slave->index = partno;
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
@@ -500,15 +496,9 @@
}
out_register:
- if (part->mtdp) {
- /* store the object pointer (caller may or may not register it*/
- *part->mtdp = &slave->mtd;
- slave->registered = 0;
- } else {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
- }
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+
return slave;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f327689..ce96c09 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,12 @@
help
Support for NAND flash on Amstrad E3 (Delta).
+config MTD_NAND_OMAP2
+ tristate "NAND Flash device on OMAP2 and OMAP3"
+ depends on ARM && MTD_NAND && (ARCH_OMAP2 || ARCH_OMAP3)
+ help
+ Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+
config MTD_NAND_TS7250
tristate "NAND Flash device on TS-7250 board"
depends on MACH_TS72XX
@@ -139,27 +145,27 @@
This enables the NAND flash driver on the PPChameleon EVB Board.
config MTD_NAND_S3C2410
- tristate "NAND Flash support for S3C2410/S3C2440 SoC"
- depends on ARCH_S3C2410
+ tristate "NAND Flash support for Samsung S3C SoCs"
+ depends on ARCH_S3C2410 || ARCH_S3C64XX
help
- This enables the NAND flash controller on the S3C2410 and S3C2440
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
SoCs
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
config MTD_NAND_S3C2410_DEBUG
- bool "S3C2410 NAND driver debug"
+ bool "Samsung S3C NAND driver debug"
depends on MTD_NAND_S3C2410
help
- Enable debugging of the S3C2410 NAND driver
+ Enable debugging of the S3C NAND driver
config MTD_NAND_S3C2410_HWECC
- bool "S3C2410 NAND Hardware ECC"
+ bool "Samsung S3C NAND Hardware ECC"
depends on MTD_NAND_S3C2410
help
- Enable the use of the S3C2410's internal ECC generator when
- using NAND. Early versions of the chip have had problems with
+ Enable the use of the controller's internal ECC generator when
+ using NAND. Early versions of the chips have had problems with
incorrect ECC generation, and if using these, the default of
software ECC is preferable.
@@ -171,7 +177,7 @@
NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
config MTD_NAND_S3C2410_CLKSTOP
- bool "S3C2410 NAND IDLE clock stop"
+ bool "Samsung S3C NAND IDLE clock stop"
depends on MTD_NAND_S3C2410
default n
help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d33860a..f3a786b 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -25,6 +25,7 @@
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 47a33ce..2802992 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -47,6 +48,9 @@
#define no_ecc 0
#endif
+static int on_flash_bbt = 0;
+module_param(on_flash_bbt, int, 0);
+
/* Register access macros */
#define ecc_readl(add, reg) \
__raw_readl(add + ATMEL_ECC_##reg)
@@ -459,12 +463,17 @@
if (host->board->det_pin) {
if (gpio_get_value(host->board->det_pin)) {
- printk("No SmartMedia card inserted.\n");
+ printk(KERN_INFO "No SmartMedia card inserted.\n");
res = ENXIO;
goto err_no_card;
}
}
+ if (on_flash_bbt) {
+ printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+ nand_chip->options |= NAND_USE_FLASH_BBT;
+ }
+
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
res = -ENXIO;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4c2a67c..8506e7e 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -458,7 +458,7 @@
return IRQ_HANDLED;
}
-static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
+static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
uint8_t *buf, int is_read)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
@@ -496,11 +496,20 @@
/* setup DMA register with Blackfin DMA API */
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
+
+/* The DMAs have different size on BF52x and BF54x */
+#ifdef CONFIG_BF52x
+ set_dma_x_count(CH_NFC, (page_size >> 1));
+ set_dma_x_modify(CH_NFC, 2);
+ val = DI_EN | WDSIZE_16;
+#endif
+
+#ifdef CONFIG_BF54x
set_dma_x_count(CH_NFC, (page_size >> 2));
set_dma_x_modify(CH_NFC, 4);
-
- /* setup write or read operation */
val = DI_EN | WDSIZE_32;
+#endif
+ /* setup write or read operation */
if (is_read)
val |= WNR;
set_dma_config(CH_NFC, val);
@@ -512,8 +521,6 @@
else
bfin_write_NFC_PGCTL(0x2);
wait_for_completion(&info->dma_completion);
-
- return 0;
}
static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 02700f7..0fad648 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -44,7 +44,7 @@
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
- * The 1-bit ECC hardware is supported, but not yet the newer 4-bit ECC
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
@@ -54,11 +54,14 @@
struct davinci_nand_info {
struct mtd_info mtd;
struct nand_chip chip;
+ struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
bool partitioned;
+ bool is_readmode;
+
void __iomem *base;
void __iomem *vaddr;
@@ -73,6 +76,7 @@
};
static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
@@ -218,6 +222,192 @@
/*----------------------------------------------------------------------*/
/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+ u_char *data, u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ unsigned num_errors, corrected;
+
+ /* All bytes 0xff? It's an erased page; ignore its ECC. */
+ for (i = 0; i < 10; i++) {
+ if (ecc_code[i] != 0xff)
+ goto compare;
+ }
+ return 0;
+
+compare:
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (unsigned) ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ return 0;
+ case 1: /* five or more errors detected */
+ return -EIO;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
* NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
* how these chips are normally wired. This translates to both 8 and 16
* bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
@@ -294,6 +484,23 @@
/*----------------------------------------------------------------------*/
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_small __initconst = {
+ .eccbytes = 10,
+ .eccpos = { 0, 1, 2, 3, 4,
+ /* offset 5 holds the badblock marker */
+ 6, 7,
+ 13, 14, 15, },
+ .oobfree = {
+ {.offset = 8, .length = 5, },
+ {.offset = 16, },
+ },
+};
+
+
static int __init nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
@@ -306,6 +513,10 @@
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
/* which external chipselect will we be managing? */
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
@@ -351,7 +562,7 @@
info->chip.select_chip = nand_davinci_select_chip;
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
- info->chip.options = pdata ? pdata->options : 0;
+ info->chip.options = pdata->options;
info->ioaddr = (uint32_t __force) vaddr;
@@ -360,14 +571,8 @@
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
- if (pdata && pdata->mask_ale)
- info->mask_ale = pdata->mask_cle;
- else
- info->mask_ale = MASK_ALE;
- if (pdata && pdata->mask_cle)
- info->mask_cle = pdata->mask_cle;
- else
- info->mask_cle = MASK_CLE;
+ info->mask_ale = pdata->mask_cle ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
info->chip.cmd_ctrl = nand_davinci_hwcontrol;
@@ -377,30 +582,44 @@
info->chip.read_buf = nand_davinci_read_buf;
info->chip.write_buf = nand_davinci_write_buf;
- /* use board-specific ECC config; else, the best available */
- if (pdata)
- ecc_mode = pdata->ecc_mode;
- else
- ecc_mode = NAND_ECC_HW;
+ /* Use board-specific ECC config */
+ ecc_mode = pdata->ecc_mode;
+ ret = -EINVAL;
switch (ecc_mode) {
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
break;
case NAND_ECC_HW:
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
- info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 3;
- break;
- case NAND_ECC_HW_SYNDROME:
- /* FIXME implement */
- info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 10;
+ if (pdata->ecc_bits == 4) {
+ /* No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
- dev_warn(&pdev->dev, "4-bit ECC nyet supported\n");
- /* FALL THROUGH */
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ goto err_ecc;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ } else {
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ }
+ info->chip.ecc.size = 512;
+ break;
default:
ret = -EINVAL;
goto err_ecc;
@@ -441,12 +660,56 @@
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
}
+ /* Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = info->mtd.writesize / 512;
+
+ if (!chunks || info->mtd.oobsize < 16) {
+ dev_dbg(&pdev->dev, "too small\n");
+ ret = -EINVAL;
+ goto err_scan;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ info->ecclayout = hwecc4_small;
+ info->ecclayout.oobfree[1].length =
+ info->mtd.oobsize - 16;
+ goto syndrome_done;
+ }
+
+ /* For large page chips we'll be wanting to use a
+ * not-yet-implemented mode that reads OOB data
+ * before reading the body of the page, to avoid
+ * the "infix OOB" model of NAND_ECC_HW_SYNDROME
+ * (and preserve manufacturer badblock markings).
+ */
+ dev_warn(&pdev->dev, "no 4-bit ECC support yet "
+ "for large page NAND\n");
+ ret = -EIO;
+ goto err_scan;
+
+syndrome_done:
+ info->chip.ecc.layout = &info->ecclayout;
+ }
+
+ ret = nand_scan_tail(&info->mtd);
+ if (ret < 0)
+ goto err_scan;
+
if (mtd_has_partitions()) {
struct mtd_partition *mtd_parts = NULL;
int mtd_parts_nb = 0;
@@ -455,22 +718,11 @@
static const char *probes[] __initconst =
{ "cmdlinepart", NULL };
- const char *master_name;
-
- /* Set info->mtd.name = 0 temporarily */
- master_name = info->mtd.name;
- info->mtd.name = (char *)0;
-
- /* info->mtd.name == 0, means: don't bother checking
- <mtd-id> */
mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
&mtd_parts, 0);
-
- /* Restore info->mtd.name */
- info->mtd.name = master_name;
}
- if (mtd_parts_nb <= 0 && pdata) {
+ if (mtd_parts_nb <= 0) {
mtd_parts = pdata->parts;
mtd_parts_nb = pdata->nr_parts;
}
@@ -483,7 +735,7 @@
info->partitioned = true;
}
- } else if (pdata && pdata->nr_parts) {
+ } else if (pdata->nr_parts) {
dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, info->mtd.name);
}
@@ -509,6 +761,11 @@
err_clk_enable:
clk_put(info->clk);
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
err_ecc:
err_clk:
err_ioremap:
@@ -532,6 +789,11 @@
else
status = del_mtd_device(&info->mtd);
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
iounmap(info->base);
iounmap(info->vaddr);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 40c2608..76beea4 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -138,7 +138,14 @@
static struct nand_ecclayout nand_hw_eccoob_16 = {
.eccbytes = 5,
.eccpos = {6, 7, 8, 9, 10},
- .oobfree = {{0, 6}, {12, 4}, }
+ .oobfree = {{0, 5}, {11, 5}, }
+};
+
+static struct nand_ecclayout nand_hw_eccoob_64 = {
+ .eccbytes = 20,
+ .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
+ .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -192,7 +199,7 @@
}
udelay(1);
}
- if (max_retries <= 0)
+ if (max_retries < 0)
DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
__func__, param);
}
@@ -795,9 +802,13 @@
send_addr(host, (page_addr & 0xff), false);
if (host->pagesize_2k) {
- send_addr(host, (page_addr >> 8) & 0xFF, false);
- if (mtd->size >= 0x40000000)
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, false);
send_addr(host, (page_addr >> 16) & 0xff, true);
+ } else
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
@@ -923,7 +934,6 @@
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 512;
this->ecc.bytes = 3;
- this->ecc.layout = &nand_hw_eccoob_8;
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
@@ -957,12 +967,44 @@
this->ecc.layout = &nand_hw_eccoob_16;
}
- host->pagesize_2k = 0;
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1)) {
+ err = -ENXIO;
+ goto escan;
+ }
- /* Scan to find existence of the device */
- if (nand_scan(mtd, 1)) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "MXC_ND: Unable to find any NAND device.\n");
+ host->pagesize_2k = (mtd->writesize == 2048) ? 1 : 0;
+
+ if (this->ecc.mode == NAND_ECC_HW) {
+ switch (mtd->oobsize) {
+ case 8:
+ this->ecc.layout = &nand_hw_eccoob_8;
+ break;
+ case 16:
+ this->ecc.layout = &nand_hw_eccoob_16;
+ break;
+ case 64:
+ this->ecc.layout = &nand_hw_eccoob_64;
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ this->ecc.size = 512;
+ this->ecc.bytes = 3;
+ this->ecc.layout = &nand_hw_eccoob_8;
+ this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.calculate = NULL;
+ this->ecc.correct = NULL;
+ this->ecc.hwctl = NULL;
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_ECC_EN;
+ writew(tmp, host->regs + NFC_CONFIG1);
+ break;
+ }
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
err = -ENXIO;
goto escan;
}
@@ -985,7 +1027,7 @@
return 0;
escan:
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
eirq:
iounmap(host->regs);
eres:
@@ -1005,7 +1047,7 @@
platform_set_drvdata(pdev, NULL);
nand_release(&host->mtd);
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
iounmap(host->regs);
kfree(host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3d7ed43..8c21b89 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2756,7 +2756,8 @@
* the out of band area
*/
chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length; i++)
+ for (i = 0; chip->ecc.layout->oobfree[i].length
+ && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
chip->ecc.layout->oobavail +=
chip->ecc.layout->oobfree[i].length;
mtd->oobavail = chip->ecc.layout->oobavail;
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 868147a..c0cb87d 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -428,8 +428,8 @@
int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
- unsigned char b0, b1, b2;
- unsigned char byte_addr, bit_addr;
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
/* 256 or 512 bytes/ecc */
const uint32_t eccsize_mult =
(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
new file mode 100644
index 0000000..0cd76f8
--- /dev/null
+++ b/drivers/mtd/nand/omap2.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+
+#include <asm/dma.h>
+
+#include <mach/gpmc.h>
+#include <mach/nand.h>
+
+#define GPMC_IRQ_STATUS 0x18
+#define GPMC_ECC_CONFIG 0x1F4
+#define GPMC_ECC_CONTROL 0x1F8
+#define GPMC_ECC_SIZE_CONFIG 0x1FC
+#define GPMC_ECC1_RESULT 0x200
+
+#define DRIVER_NAME "omap2-nand"
+
+/* size (4 KiB) for IO mapping */
+#define NAND_IO_SIZE SZ_4K
+
+#define NAND_WP_OFF 0
+#define NAND_WP_BIT 0x00000010
+#define WR_RD_PIN_MONITORING 0x00600000
+
+#define GPMC_BUF_FULL 0x00000001
+#define GPMC_BUF_EMPTY 0x00000000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+struct omap_nand_info {
+ struct nand_hw_control controller;
+ struct omap_nand_platform_data *pdata;
+ struct mtd_info mtd;
+ struct mtd_partition *parts;
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ unsigned long phys_base;
+ void __iomem *gpmc_cs_baseaddr;
+ void __iomem *gpmc_baseaddr;
+};
+
+/**
+ * omap_nand_wp - This function enable or disable the Write Protect feature
+ * @mtd: MTD device structure
+ * @mode: WP ON/OFF
+ */
+static void omap_nand_wp(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+
+ unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
+
+ if (mode)
+ config &= ~(NAND_WP_BIT); /* WP is ON */
+ else
+ config |= (NAND_WP_BIT); /* WP is OFF */
+
+ __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
+}
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ switch (ctrl) {
+ case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_ADDRESS;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_NCE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ __raw_writeb(cmd, info->nand.IO_ADDR_W);
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ __raw_readsw(nand->IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u16 *p = (u16 *) buf;
+
+ /* FIXME try bursts of writesw() or DMA ... */
+ len >>= 1;
+
+ while (len--) {
+ writew(*p++, info->nand.IO_ADDR_W);
+
+ while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
+ GPMC_STATUS) & GPMC_BUF_FULL))
+ ;
+ }
+}
+/**
+ * omap_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ */
+static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ u16 *p = (u16 *) buf;
+
+ len >>= 1;
+ while (len--) {
+ if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+/**
+ * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
+ * @mtd: MTD device structure
+ */
+static void omap_hwecc_init(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned long val = 0x0;
+
+ /* Read from ECC Control Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* Clear all ECC | Enable Reg1 */
+ val = ((0x00000001<<8) | 0x00000001);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+
+ /* Read from ECC Size Config Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+ /* ECCSIZE1=512 | Select eccResultsize[0-3] */
+ val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+ return -1;
+
+ case 11:
+ /* UN-Correctable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
+ return -1;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
+ "offset: %d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 0;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
+ return -1;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
+ * and correction.
+ */
+static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int blockCnt = 0, i = 0, ret = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+ (info->nand.ecc.size == 2048))
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return 0;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long val = 0x0;
+ unsigned long reg;
+
+ /* Start Reading from HW ECC1_Result = 0x200 */
+ reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
+ val = __raw_readl(reg);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+ reg += 4;
+
+ return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_READSYN:
+ __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_WRITE:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ default:
+ DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
+ mode);
+ break;
+ }
+
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+}
+#endif
+
+/**
+ * omap_wait - wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long timeo = jiffies;
+ int status, state = this->state;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
+
+ __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
+
+ while (time_before(jiffies, timeo)) {
+ status = __raw_readb(this->IO_ADDR_R);
+ if (!(status & 0x40))
+ break;
+ }
+ return status;
+}
+
+/**
+ * omap_dev_ready - calls the platform specific dev_ready function
+ * @mtd: MTD device structure
+ */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+
+ if ((val & 0x100) == 0x100) {
+ /* Clear IRQ Interrupt */
+ val |= 0x100;
+ val &= ~(0x0);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+ } else {
+ unsigned int cnt = 0;
+ while (cnt++ < 0x1FF) {
+ if ((val & 0x100) == 0x100)
+ return 0;
+ val = __raw_readl(info->gpmc_baseaddr +
+ GPMC_IRQ_STATUS);
+ }
+ }
+
+ return 1;
+}
+
+static int __devinit omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct omap_nand_platform_data *pdata;
+ int err;
+ unsigned long val;
+
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ info->pdev = pdev;
+
+ info->gpmc_cs = pdata->cs;
+ info->gpmc_baseaddr = pdata->gpmc_baseaddr;
+ info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
+
+ info->mtd.priv = &info->nand;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
+
+ err = gpmc_cs_request(info->gpmc_cs, NAND_IO_SIZE, &info->phys_base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+ goto out_free_info;
+ }
+
+ /* Enable RD PIN Monitoring Reg */
+ if (pdata->dev_ready) {
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1);
+ val |= WR_RD_PIN_MONITORING;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG1, val);
+ }
+
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG7);
+ val &= ~(0xf << 8);
+ val |= (0xc & 0xf) << 8;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG7, val);
+
+ /* NAND write protect off */
+ omap_nand_wp(&info->mtd, NAND_WP_OFF);
+
+ if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ pdev->dev.driver->name)) {
+ err = -EBUSY;
+ goto out_free_cs;
+ }
+
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ if (!info->nand.IO_ADDR_R) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+ info->nand.controller = &info->controller;
+
+ info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
+ info->nand.cmd_ctrl = omap_hwcontrol;
+
+ /* REVISIT: only supports 16-bit NAND flash */
+
+ info->nand.read_buf = omap_read_buf16;
+ info->nand.write_buf = omap_write_buf16;
+ info->nand.verify_buf = omap_verify_buf;
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * funcrtion and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (pdata->dev_ready) {
+ info->nand.dev_ready = omap_dev_ready;
+ info->nand.chip_delay = 0;
+ } else {
+ info->nand.waitfunc = omap_wait;
+ info->nand.chip_delay = 50;
+ }
+
+ info->nand.options |= NAND_SKIP_BBTSCAN;
+ if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000)
+ == 0x1000)
+ info->nand.options |= NAND_BUSWIDTH_16;
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+
+ /* init HW ECC */
+ omap_hwecc_init(&info->mtd);
+#else
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+#endif
+
+ /* DIP switches on some boards change between 8 and 16 bit
+ * bus widths for flash. Try the other width if the first try fails.
+ */
+ if (nand_scan(&info->mtd, 1)) {
+ info->nand.options ^= NAND_BUSWIDTH_16;
+ if (nand_scan(&info->mtd, 1)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(&info->mtd, info->parts, err);
+ else if (pdata->parts)
+ add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ add_mtd_device(&info->mtd);
+
+ platform_set_drvdata(pdev, &info->mtd);
+
+ return 0;
+
+out_release_mem_region:
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+out_free_cs:
+ gpmc_cs_free(info->gpmc_cs);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct omap_nand_info *info = mtd->priv;
+
+ platform_set_drvdata(pdev, NULL);
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+ kfree(&info->mtd);
+ return 0;
+}
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_nand_init(void)
+{
+ printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ return platform_driver_register(&omap_nand_driver);
+}
+
+static void __exit omap_nand_exit(void)
+{
+ platform_driver_unregister(&omap_nand_driver);
+}
+
+module_init(omap_nand_init);
+module_exit(omap_nand_exit);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index c2dfd3e..7ad9722 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -47,6 +47,28 @@
writeb(cmd, nc->IO_ADDR_W + offs);
}
+static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *io_base = chip->IO_ADDR_R;
+ uint64_t *buf64;
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ uint64_t x;
+ asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct mtd_info *mtd;
@@ -83,6 +105,7 @@
nc->priv = board;
nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->read_buf = orion_nand_read_buf;
nc->ecc.mode = NAND_ECC_SOFT;
if (board->chip_delay)
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 86e1d08..4e16c6f 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -61,6 +61,8 @@
data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
data->chip.dev_ready = pdata->ctrl.dev_ready;
data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.write_buf = pdata->ctrl.write_buf;
+ data->chip.read_buf = pdata->ctrl.read_buf;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
@@ -70,6 +72,13 @@
platform_set_drvdata(pdev, data);
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ res = pdata->ctrl.probe(pdev);
+ if (res)
+ goto out;
+ }
+
/* Scan to find existance of the device */
if (nand_scan(&data->mtd, 1)) {
res = -ENXIO;
@@ -86,6 +95,8 @@
return 0;
}
}
+ if (pdata->chip.set_parts)
+ pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
res = add_mtd_partitions(&data->mtd, data->parts,
@@ -99,6 +110,8 @@
nand_release(&data->mtd);
out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
platform_set_drvdata(pdev, NULL);
iounmap(data->io_base);
kfree(data);
@@ -111,15 +124,15 @@
static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
-#ifdef CONFIG_MTD_PARTITIONS
struct platform_nand_data *pdata = pdev->dev.platform_data;
-#endif
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS
if (data->parts && data->parts != pdata->chip.partitions)
kfree(data->parts);
#endif
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
iounmap(data->io_base);
kfree(data);
@@ -128,7 +141,7 @@
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove = plat_nand_remove,
+ .remove = __devexit_p(plat_nand_remove),
.driver = {
.name = "gen_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 8e375d5..11dc7e6 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -74,6 +74,14 @@
struct s3c2410_nand_info;
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+ * @scan_res: The result from calling nand_scan_ident().
+*/
struct s3c2410_nand_mtd {
struct mtd_info mtd;
struct nand_chip chip;
@@ -90,6 +98,21 @@
/* overview of the s3c2410 nand state */
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @area: The IO area resource that came from request_mem_region().
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers described by @area.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @cpu_type: The exact type of this controller.
+ */
struct s3c2410_nand_info {
/* mtd info */
struct nand_hw_control controller;
@@ -145,12 +168,19 @@
#define NS_IN_KHZ 1000000
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
- result = (wanted * clk) / NS_IN_KHZ;
- result++;
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
@@ -169,13 +199,21 @@
/* controller setup */
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
{
struct s3c2410_platform_nand *plat = info->platform;
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long set, cfg, mask;
+ unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
unsigned long flags;
/* calculate the timing information for the controller */
@@ -215,9 +253,9 @@
case TYPE_S3C2440:
case TYPE_S3C2412:
- mask = (S3C2410_NFCONF_TACLS(tacls_max - 1) |
- S3C2410_NFCONF_TWRPH0(7) |
- S3C2410_NFCONF_TWRPH1(7));
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
set = S3C2440_NFCONF_TACLS(tacls - 1);
set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
@@ -225,14 +263,9 @@
break;
default:
- /* keep compiler happy */
- mask = 0;
- set = 0;
BUG();
}
- dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
local_irq_save(flags);
cfg = readl(info->regs + S3C2410_NFCONF);
@@ -242,9 +275,18 @@
local_irq_restore(flags);
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
return 0;
}
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
{
int ret;
@@ -268,8 +310,19 @@
return 0;
}
-/* select chip */
-
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @mtd: The MTD instance for this chip.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
{
struct s3c2410_nand_info *info;
@@ -530,7 +583,16 @@
static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- readsl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
}
static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
@@ -542,7 +604,16 @@
static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- writesl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
}
/* cpufreq driver support */
@@ -593,7 +664,7 @@
/* device management functions */
-static int s3c2410_nand_remove(struct platform_device *pdev)
+static int s3c24xx_nand_remove(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = to_nand_info(pdev);
@@ -645,17 +716,31 @@
}
#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
+ struct mtd_partition *part_info;
+ int nr_part = 0;
+
if (set == NULL)
return add_mtd_device(&mtd->mtd);
- if (set->nr_partitions > 0 && set->partitions != NULL) {
- return add_mtd_partitions(&mtd->mtd, set->partitions, set->nr_partitions);
+ if (set->nr_partitions == 0) {
+ mtd->mtd.name = set->name;
+ nr_part = parse_mtd_partitions(&mtd->mtd, part_probes,
+ &part_info, 0);
+ } else {
+ if (set->nr_partitions > 0 && set->partitions != NULL) {
+ nr_part = set->nr_partitions;
+ part_info = set->partitions;
+ }
}
+ if (nr_part > 0 && part_info)
+ return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
+
return add_mtd_device(&mtd->mtd);
}
#else
@@ -667,11 +752,16 @@
}
#endif
-/* s3c2410_nand_init_chip
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
*
- * init a single instance of an chip
-*/
-
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd,
struct s3c2410_nand_set *set)
@@ -757,14 +847,40 @@
if (set->disable_ecc)
chip->ecc.mode = NAND_ECC_NONE;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_NONE:
+ dev_info(info->device, "NAND ECC disabled\n");
+ break;
+ case NAND_ECC_SOFT:
+ dev_info(info->device, "NAND soft ECC\n");
+ break;
+ case NAND_ECC_HW:
+ dev_info(info->device, "NAND hardware ECC\n");
+ break;
+ default:
+ dev_info(info->device, "NAND ECC UNKNOWN\n");
+ break;
+ }
+
+ /* If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND, and also skip the
+ * full NAND scan that can take 1/2s or so. Little things... */
+ if (set->flash_bbt)
+ chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
}
-/* s3c2410_nand_update_chip
+/**
+ * s3c2410_nand_update_chip - post probe update
+ * @info: The controller instance.
+ * @nmtd: The driver version of the MTD instance.
*
- * post-probe chip update, to change any items, such as the
- * layout for large page nand
- */
-
+ * This routine is called after the chip probe has succesfully completed
+ * and the relevant per-chip information updated. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd)
{
@@ -773,33 +889,33 @@
dev_dbg(info->device, "chip %p => page shift %d\n",
chip, chip->page_shift);
- if (hardware_ecc) {
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return;
+
/* change the behaviour depending on wether we are using
* the large or small page nand device */
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.layout = &nand_hw_eccoob;
- }
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.layout = &nand_hw_eccoob;
}
}
-/* s3c2410_nand_probe
+/* s3c24xx_nand_probe
*
* called by device layer when it finds a device matching
* one our driver can handled. This code checks to see if
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-
-static int s3c24xx_nand_probe(struct platform_device *pdev,
- enum s3c_cpu_type cpu_type)
+static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ enum s3c_cpu_type cpu_type;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -809,6 +925,8 @@
int nr_sets;
int setno;
+ cpu_type = platform_get_device_id(pdev)->driver_data;
+
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -922,7 +1040,7 @@
return 0;
exit_error:
- s3c2410_nand_remove(pdev);
+ s3c24xx_nand_remove(pdev);
if (err == 0)
err = -EINVAL;
@@ -983,50 +1101,33 @@
/* driver device registration */
-static int s3c2410_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2410);
-}
-
-static int s3c2440_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2440);
-}
-
-static int s3c2412_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2412);
-}
-
-static struct platform_driver s3c2410_nand_driver = {
- .probe = s3c2410_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2410-nand",
- .owner = THIS_MODULE,
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
},
+ { }
};
-static struct platform_driver s3c2440_nand_driver = {
- .probe = s3c2440_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2440-nand",
- .owner = THIS_MODULE,
- },
-};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-static struct platform_driver s3c2412_nand_driver = {
- .probe = s3c2412_nand_probe,
- .remove = s3c2410_nand_remove,
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove = s3c24xx_nand_remove,
.suspend = s3c24xx_nand_suspend,
.resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
.driver = {
- .name = "s3c2412-nand",
+ .name = "s3c24xx-nand",
.owner = THIS_MODULE,
},
};
@@ -1035,16 +1136,12 @@
{
printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
- platform_driver_register(&s3c2412_nand_driver);
- platform_driver_register(&s3c2440_nand_driver);
- return platform_driver_register(&s3c2410_nand_driver);
+ return platform_driver_register(&s3c24xx_nand_driver);
}
static void __exit s3c2410_nand_exit(void)
{
- platform_driver_unregister(&s3c2412_nand_driver);
- platform_driver_unregister(&s3c2440_nand_driver);
- platform_driver_unregister(&s3c2410_nand_driver);
+ platform_driver_unregister(&s3c24xx_nand_driver);
}
module_init(s3c2410_nand_init);
@@ -1053,6 +1150,3 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
-MODULE_ALIAS("platform:s3c2410-nand");
-MODULE_ALIAS("platform:s3c2412-nand");
-MODULE_ALIAS("platform:s3c2440-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 8124792..488088e 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -64,7 +64,7 @@
struct nand_chip chip;
struct mtd_info mtd;
int cs;
- char mtdname[BUS_ID_SIZE + 2];
+ const char *mtdname;
};
#define MAX_TXX9NDFMC_DEV 4
@@ -334,16 +334,23 @@
if (plat->ch_mask != 1) {
txx9_priv->cs = i;
- sprintf(txx9_priv->mtdname, "%s.%u",
- dev_name(&dev->dev), i);
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
} else {
txx9_priv->cs = -1;
- strcpy(txx9_priv->mtdname, dev_name(&dev->dev));
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
}
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
if (nand_scan(mtd, 1)) {
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
@@ -385,6 +392,7 @@
kfree(drvdata->parts[i]);
#endif
del_mtd_device(mtd);
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
return 0;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6391e3d..38d656b 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -565,7 +565,7 @@
NULL, __adjust_timing);
}
-static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
+static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -777,7 +777,7 @@
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = omap2_onenand_remove,
+ .remove = __devexit_p(omap2_onenand_remove),
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 30d6999..6e82909 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -9,6 +9,10 @@
* auto-placement support, read-while load support, various fixes
* Copyright (C) Nokia Corporation, 2007
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -16,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -27,6 +32,38 @@
#include <asm/io.h>
+/* Default Flex-OneNAND boundary and lock respectively */
+static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
+
+module_param_array(flex_bdry, int, NULL, 0400);
+MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
+ "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
+ "DIE_BDRY: SLC boundary of the die"
+ "LOCK: Locking information for SLC boundary"
+ " : 0->Set boundary in unlocked status"
+ " : 1->Set boundary in locked status");
+
+/**
+ * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105
+ },
+ .oobfree = {
+ {2, 4}, {18, 4}, {34, 4}, {50, 4},
+ {66, 4}, {82, 4}, {98, 4}, {114, 4}
+ }
+};
+
/**
* onenand_oob_64 - oob info for large (2KB) page
*/
@@ -65,6 +102,14 @@
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
};
/**
@@ -171,6 +216,70 @@
}
/**
+ * flexonenand_block- For given address return block number
+ * @param this - OneNAND device structure
+ * @param addr - Address for which block number is needed
+ */
+static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t)block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t)block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+EXPORT_SYMBOL(onenand_addr);
+
+/**
* onenand_get_density - [DEFAULT] Get OneNAND density
* @param dev_id OneNAND device ID
*
@@ -183,6 +292,22 @@
}
/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @param mtd MTD device structure
+ * @param addr address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+EXPORT_SYMBOL(flexonenand_region);
+
+/**
* onenand_command - [DEFAULT] Send command to OneNAND device
* @param mtd MTD device structure
* @param cmd the command to be sent
@@ -207,16 +332,28 @@
page = -1;
break;
- case ONENAND_CMD_ERASE:
- case ONENAND_CMD_BUFFERRAM:
- case ONENAND_CMD_OTP_ACCESS:
- block = (int) (addr >> this->erase_shift);
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
page = -1;
break;
+ case ONENAND_CMD_ERASE:
+ case ONENAND_CMD_BUFFERRAM:
+ case ONENAND_CMD_OTP_ACCESS:
+ block = onenand_block(this, addr);
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
default:
- block = (int) (addr >> this->erase_shift);
- page = (int) (addr >> this->page_shift);
+ block = onenand_block(this, addr);
+ page = (int) (addr - onenand_addr(this, block)) >> this->page_shift;
if (ONENAND_IS_2PLANE(this)) {
/* Make the even block number */
@@ -236,7 +373,7 @@
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_2PLANE(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -258,13 +395,18 @@
if (page != -1) {
/* Now we use page size operation */
- int sectors = 4, count = 4;
+ int sectors = 0, count = 0;
int dataram;
switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+ if (ONENAND_IS_MLC(this))
+ /* It is always BufferRAM0 */
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
break;
default:
@@ -293,6 +435,30 @@
}
/**
+ * onenand_read_ecc - return ecc status
+ * @param this onenand chip structure
+ */
+static inline int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i, result = 0;
+
+ if (!FLEXONENAND(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ else
+ result = ONENAND_ECC_1BIT_ALL;
+ }
+
+ return result;
+}
+
+/**
* onenand_wait - [DEFAULT] wait until the command is done
* @param mtd MTD device structure
* @param state state to select the max. timeout value
@@ -331,14 +497,14 @@
* power off recovery (POR) test, it should read ECC status first
*/
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc) {
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
- printk(KERN_INFO "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
+ printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.corrected++;
}
}
@@ -656,7 +822,7 @@
if (found && ONENAND_IS_DDP(this)) {
/* Select DataRAM for DDP */
- int block = (int) (addr >> this->erase_shift);
+ int block = onenand_block(this, addr);
int value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
@@ -816,6 +982,149 @@
}
/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @param mtd MTD device structure
+ * @param addr address to recover
+ * @param status return value from onenand_wait / onenand_bbt_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ /* We are attempting to reread, so decrement stats.failed
+ * which was incremented by onenand_wait due to read failure
+ */
+ printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n");
+ mtd->ecc_stats.failed--;
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops: oob operation description structure
+ *
+ * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
+ * So, read-while-load is not present.
+ */
+static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0;
+ int writesize = this->writesize;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if (from + len > mtd->size) {
+ printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n");
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = min_t(int, writesize, len - read);
+
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ read += thislen;
+ if (read == len)
+ break;
+
+ from += thislen;
+ buf += thislen;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
* onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
* @param mtd MTD device structure
* @param from offset to read from
@@ -962,7 +1271,7 @@
size_t len = ops->ooblen;
mtd_oob_mode_t mode = ops->mode;
u_char *buf = ops->oobbuf;
- int ret = 0;
+ int ret = 0, readcmd;
from += ops->ooboffs;
@@ -993,17 +1302,22 @@
stats = mtd->ecc_stats;
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret && ret != -EBADMSG) {
printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
break;
@@ -1053,6 +1367,7 @@
static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct onenand_chip *this = mtd->priv;
struct mtd_oob_ops ops = {
.len = len,
.ooblen = 0,
@@ -1062,7 +1377,9 @@
int ret;
onenand_get_device(mtd, FL_READING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
*retlen = ops.retlen;
@@ -1080,6 +1397,7 @@
static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
switch (ops->mode) {
@@ -1094,7 +1412,9 @@
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = onenand_read_ops_nolock(mtd, from, ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, ops) :
+ onenand_read_ops_nolock(mtd, from, ops);
else
ret = onenand_read_oob_nolock(mtd, from, ops);
onenand_release_device(mtd);
@@ -1128,11 +1448,11 @@
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
", controller error 0x%04x\n", ecc, ctrl);
- return ONENAND_BBT_READ_ERROR;
+ return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
printk(KERN_ERR "onenand_bbt_wait: read timeout!"
@@ -1163,7 +1483,7 @@
{
struct onenand_chip *this = mtd->priv;
int read = 0, thislen, column;
- int ret = 0;
+ int ret = 0, readcmd;
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
@@ -1183,17 +1503,22 @@
column = from & (mtd->oobsize - 1);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = mtd->oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
- ret = onenand_bbt_wait(mtd, FL_READING);
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret)
break;
@@ -1230,9 +1555,11 @@
{
struct onenand_chip *this = mtd->priv;
u_char *oob_buf = this->oob_buf;
- int status, i;
+ int status, i, readcmd;
- this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
status = this->wait(mtd, FL_READING);
if (status)
@@ -1633,7 +1960,7 @@
{
struct onenand_chip *this = mtd->priv;
int column, ret = 0, oobsize;
- int written = 0;
+ int written = 0, oobcmd;
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
@@ -1675,6 +2002,8 @@
oobbuf = this->oob_buf;
+ oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, oobsize, len - written);
@@ -1692,7 +2021,14 @@
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
+ if (ONENAND_IS_MLC(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
if (ONENAND_IS_2PLANE(this)) {
@@ -1815,41 +2151,57 @@
{
struct onenand_chip *this = mtd->priv;
unsigned int block_size;
- loff_t addr;
- int len;
- int ret = 0;
+ loff_t addr = instr->addr;
+ loff_t len = instr->len;
+ int ret = 0, i;
+ struct mtd_erase_region_info *region = NULL;
+ loff_t region_end = 0;
DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
- block_size = (1 << this->erase_shift);
-
- /* Start address must align on block boundary */
- if (unlikely(instr->addr & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ /* Do not allow erase past end of device */
+ if (unlikely((len + addr) > mtd->size)) {
+ printk(KERN_ERR "onenand_erase: Erase past end of device\n");
return -EINVAL;
}
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ i = flexonenand_region(mtd, addr);
+ region = &mtd->eraseregions[i];
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ if (unlikely((addr - region->offset) & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+ } else {
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely(addr & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+ }
+
/* Length must align on block boundary */
- if (unlikely(instr->len & (block_size - 1))) {
+ if (unlikely(len & (block_size - 1))) {
printk(KERN_ERR "onenand_erase: Length not block aligned\n");
return -EINVAL;
}
- /* Do not allow erase past end of device */
- if (unlikely((instr->len + instr->addr) > mtd->size)) {
- printk(KERN_ERR "onenand_erase: Erase past end of device\n");
- return -EINVAL;
- }
-
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
/* Grab the lock and see if the device is available */
onenand_get_device(mtd, FL_ERASING);
/* Loop throught the pages */
- len = instr->len;
- addr = instr->addr;
-
instr->state = MTD_ERASING;
while (len) {
@@ -1869,7 +2221,8 @@
ret = this->wait(mtd, FL_ERASING);
/* Check, if it is write protected */
if (ret) {
- printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", (unsigned) (addr >> this->erase_shift));
+ printk(KERN_ERR "onenand_erase: Failed erase, block %d\n",
+ onenand_block(this, addr));
instr->state = MTD_ERASE_FAILED;
instr->fail_addr = addr;
goto erase_exit;
@@ -1877,6 +2230,22 @@
len -= block_size;
addr += block_size;
+
+ if (addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* FIXME: This should be handled at MTD partitioning level. */
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ goto erase_exit;
+ }
+ }
+
}
instr->state = MTD_ERASE_DONE;
@@ -1955,13 +2324,17 @@
int block;
/* Get block number */
- block = ((int) ofs) >> bbm->bbt_erase_shift;
+ block = onenand_block(this, ofs);
if (bbm->bbt)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* We write two bytes, so we dont have to mess with 16 bit access */
ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
- return onenand_write_oob_nolock(mtd, ofs, &ops);
+ /* FIXME : What to do when marking SLC block in partition
+ * with MLC erasesize? For now, it is not advisable to
+ * create partitions containing both SLC and MLC regions.
+ */
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
}
/**
@@ -2005,8 +2378,8 @@
int start, end, block, value, status;
int wp_status_mask;
- start = ofs >> this->erase_shift;
- end = len >> this->erase_shift;
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len) - 1;
if (cmd == ONENAND_CMD_LOCK)
wp_status_mask = ONENAND_WP_LS;
@@ -2018,7 +2391,7 @@
/* Set start block address */
this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Set end block address */
- this->write_word(start + end - 1, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
/* Write lock command */
this->command(mtd, cmd, 0, 0);
@@ -2039,7 +2412,7 @@
}
/* Block lock scheme */
- for (block = start; block < start + end; block++) {
+ for (block = start; block < end + 1; block++) {
/* Set block address */
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
@@ -2147,7 +2520,7 @@
{
struct onenand_chip *this = mtd->priv;
loff_t ofs = 0;
- size_t len = this->chipsize;
+ loff_t len = mtd->size;
if (this->options & ONENAND_HAS_UNLOCK_ALL) {
/* Set start block address */
@@ -2163,12 +2536,16 @@
& ONENAND_CTRL_ONGO)
continue;
+ /* Don't check lock status */
+ if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
+ return;
+
/* Check lock status */
if (onenand_check_lock_status(this))
return;
/* Workaround for all block unlock in DDP */
- if (ONENAND_IS_DDP(this)) {
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
/* All blocks on another chip */
ofs = this->chipsize >> 1;
len = this->chipsize >> 1;
@@ -2210,7 +2587,9 @@
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2277,21 +2656,32 @@
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
- .ooblen = len,
- .oobbuf = buf,
- .ooboffs = 0,
- };
+ struct mtd_oob_ops ops;
int ret;
/* Enter OTP access mode */
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_write_oob_nolock(mtd, from, &ops);
-
- *retlen = ops.oobretlen;
+ if (FLEXONENAND(this)) {
+ /*
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+ ops.len = mtd->writesize;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
+ *retlen = ops.retlen;
+ } else {
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ret = onenand_write_oob_nolock(mtd, from, &ops);
+ *retlen = ops.oobretlen;
+ }
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2475,27 +2865,34 @@
size_t len)
{
struct onenand_chip *this = mtd->priv;
- u_char *oob_buf = this->oob_buf;
+ u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
size_t retlen;
int ret;
- memset(oob_buf, 0xff, mtd->oobsize);
+ memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
+ : mtd->oobsize);
/*
* Note: OTP lock operation
* OTP block : 0xXXFC
* 1st block : 0xXXF3 (If chip support)
* Both : 0xXXF0 (If chip support)
*/
- oob_buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ if (FLEXONENAND(this))
+ buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ else
+ buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
/*
* Write lock mark to 8th word of sector0 of page0 of the spare0.
* We write 16 bytes spare area instead of 2 bytes.
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
*/
- from = 0;
- len = 16;
- ret = onenand_otp_walk(mtd, from, len, &retlen, oob_buf, do_otp_lock, MTD_OTP_USER);
+ from = 0;
+ len = FLEXONENAND(this) ? mtd->writesize : 16;
+
+ ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
return ret ? : retlen;
}
@@ -2542,6 +2939,14 @@
break;
}
+ if (ONENAND_IS_MLC(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
if (this->options & ONENAND_HAS_CONT_LOCK)
printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
if (this->options & ONENAND_HAS_UNLOCK_ALL)
@@ -2559,14 +2964,16 @@
*/
static void onenand_print_device_info(int device, int version)
{
- int vcc, demuxed, ddp, density;
+ int vcc, demuxed, ddp, density, flexonenand;
vcc = device & ONENAND_DEVICE_VCC_MASK;
demuxed = device & ONENAND_DEVICE_IS_DEMUX;
ddp = device & ONENAND_DEVICE_IS_DDP;
density = onenand_get_density(device);
- printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
- demuxed ? "" : "Muxed ",
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
ddp ? "(DDP)" : "",
(16 << density),
vcc ? "2.65/3.3" : "1.8",
@@ -2576,6 +2983,7 @@
static const struct onenand_manufacturers onenand_manuf_ids[] = {
{ONENAND_MFR_SAMSUNG, "Samsung"},
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
};
/**
@@ -2605,6 +3013,261 @@
}
/**
+* flexonenand_get_boundary - Reads the SLC boundary
+* @param onenand_info - onenand info structure
+**/
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned die, bdry;
+ int ret, syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ ret = this->wait(mtd, FL_RESETING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize
+ * @param mtd - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
+ " numblocks: %04u]\n",
+ (unsigned int) mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
+ this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @param mtd_info - mtd info structure
+ * @param start - first erase block to check
+ * @param end - last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OOB_PLACE,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "Block %d not erased.\n", block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * flexonenand_set_boundary - Writes the SLC boundary
+ * @param mtd - mtd info structure
+ */
+int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ /* Change only once for SDP Flex-OneNAND */
+ if (die && (!ONENAND_IS_DDP(this)))
+ return 0;
+
+ /* boundary value of -1 indicates no required change */
+ if (boundary < 0 || boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. "
+ "Boundary not changed.\n");
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
+ ret = 1;
+ goto out;
+ }
+
+ printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ addr = die ? this->diesize[0] : 0;
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
* onenand_probe - [OneNAND Interface] Probe the OneNAND device
* @param mtd MTD device structure
*
@@ -2621,7 +3284,7 @@
/* Save system configuration 1 */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
/* Clear Sync. Burst Read mode to read BootRAM */
- this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
/* Send the command for reading device ID from BootRAM */
this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
@@ -2646,6 +3309,7 @@
maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
/* Check OneNAND device */
if (maf_id != bram_maf_id || dev_id != bram_dev_id)
@@ -2657,29 +3321,55 @@
this->version_id = ver_id;
density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
+ * (this->dies << 1), GFP_KERNEL);
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
this->chipsize = (16 << density) << 20;
- /* Set density mask. it is used for DDP */
- if (ONENAND_IS_DDP(this))
- this->density_mask = (1 << (density + 6));
- else
- this->density_mask = 0;
/* OneNAND page size & block size */
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_MLC(this))
+ mtd->writesize <<= 1;
+
mtd->oobsize = mtd->writesize >> 5;
/* Pages per a block are always 64 in OneNAND */
mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
this->erase_shift = ffs(mtd->erasesize) - 1;
this->page_shift = ffs(mtd->writesize) - 1;
this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
/* It's real page size */
this->writesize = mtd->writesize;
/* REVIST: Multichip handling */
- mtd->size = this->chipsize;
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
/* Check OneNAND features */
onenand_check_features(mtd);
@@ -2734,7 +3424,7 @@
*/
int onenand_scan(struct mtd_info *mtd, int maxchips)
{
- int i;
+ int i, ret;
struct onenand_chip *this = mtd->priv;
if (!this->read_word)
@@ -2746,6 +3436,10 @@
this->command = onenand_command;
if (!this->wait)
onenand_setup_wait(mtd);
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+ if (!this->unlock_all)
+ this->unlock_all = onenand_unlock_all;
if (!this->read_bufferram)
this->read_bufferram = onenand_read_bufferram;
@@ -2796,6 +3490,10 @@
* Allow subpage writes up to oobsize.
*/
switch (mtd->oobsize) {
+ case 128:
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 0;
+ break;
case 64:
this->ecclayout = &onenand_oob_64;
mtd->subpage_sft = 2;
@@ -2859,9 +3557,18 @@
mtd->owner = THIS_MODULE;
/* Unlock whole block */
- onenand_unlock_all(mtd);
+ this->unlock_all(mtd);
- return this->scan_bbt(mtd);
+ ret = this->scan_bbt(mtd);
+ if ((!FLEXONENAND(this)) || ret)
+ return ret;
+
+ /* Change Flex-OneNAND boundaries if required */
+ for (i = 0; i < MAX_DIES; i++)
+ flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
+ flex_bdry[(2 * i) + 1]);
+
+ return 0;
}
/**
@@ -2890,6 +3597,7 @@
kfree(this->page_buf);
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
+ kfree(mtd->eraseregions);
}
EXPORT_SYMBOL_GPL(onenand_scan);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 2f53b51..a91fcac 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -63,6 +63,7 @@
loff_t from;
size_t readlen, ooblen;
struct mtd_oob_ops ops;
+ int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
@@ -76,7 +77,7 @@
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
- numblocks = mtd->size >> (bbm->bbt_erase_shift - 1);
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
@@ -106,7 +107,12 @@
}
}
i += 2;
- from += (1 << bbm->bbt_erase_shift);
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
}
return 0;
@@ -143,7 +149,7 @@
uint8_t res;
/* Get block number * 2 */
- block = (int) (offs >> (bbm->bbt_erase_shift - 1));
+ block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
@@ -178,7 +184,7 @@
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
- len = mtd->size >> (this->erase_shift + 2);
+ len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index d64200b..f6e3c8a 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -6,6 +6,10 @@
* Copyright © 2005-2007 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND simulator support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -24,16 +28,38 @@
#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
#endif
+
#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
#endif
+
+#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
+
#ifndef CONFIG_ONENAND_SIM_VERSION_ID
#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
#endif
+#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
+#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
+#endif
+
+/* Initial boundary values for Flex-OneNAND Simulator */
+#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
+#endif
+
+#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
+#endif
+
static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
+static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
+static int boundary[] = {
+ CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
+ CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
+};
struct onenand_flash {
void __iomem *base;
@@ -57,12 +83,18 @@
(writew(v, this->base + ONENAND_REG_WP_STATUS))
/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE (2048 + 64)
+#define MAX_ONENAND_PAGESIZE (4096 + 128)
static unsigned char *ffchars;
+#if CONFIG_FLEXONENAND
+#define PARTITION_NAME "Flex-OneNAND simulator partition"
+#else
+#define PARTITION_NAME "OneNAND simulator partition"
+#endif
+
static struct mtd_partition os_partitions[] = {
{
- .name = "OneNAND simulator partition",
+ .name = PARTITION_NAME,
.offset = 0,
.size = MTDPART_SIZ_FULL,
},
@@ -104,6 +136,7 @@
switch (cmd) {
case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_UNLOCK_ALL:
if (block_lock_scheme)
ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
else
@@ -228,10 +261,12 @@
{
struct mtd_info *mtd = &info->mtd;
struct onenand_flash *flash = this->priv;
- int main_offset, spare_offset;
+ int main_offset, spare_offset, die = 0;
void __iomem *src;
void __iomem *dest;
unsigned int i;
+ static int pi_operation;
+ int erasesize, rgn;
if (dataram) {
main_offset = mtd->writesize;
@@ -241,10 +276,27 @@
spare_offset = 0;
}
+ if (pi_operation) {
+ die = readw(this->base + ONENAND_REG_START_ADDRESS2);
+ die >>= ONENAND_DDP_SHIFT;
+ }
+
switch (cmd) {
+ case FLEXONENAND_CMD_PI_ACCESS:
+ pi_operation = 1;
+ break;
+
+ case ONENAND_CMD_RESET:
+ pi_operation = 0;
+ break;
+
case ONENAND_CMD_READ:
src = ONENAND_CORE(flash) + offset;
dest = ONENAND_MAIN_AREA(this, main_offset);
+ if (pi_operation) {
+ writew(boundary[die], this->base + ONENAND_DATARAM);
+ break;
+ }
memcpy(dest, src, mtd->writesize);
/* Fall through */
@@ -257,6 +309,10 @@
case ONENAND_CMD_PROG:
src = ONENAND_MAIN_AREA(this, main_offset);
dest = ONENAND_CORE(flash) + offset;
+ if (pi_operation) {
+ boundary[die] = readw(this->base + ONENAND_DATARAM);
+ break;
+ }
/* To handle partial write */
for (i = 0; i < (1 << mtd->subpage_sft); i++) {
int off = i * this->subpagesize;
@@ -284,9 +340,18 @@
break;
case ONENAND_CMD_ERASE:
- memset(ONENAND_CORE(flash) + offset, 0xff, mtd->erasesize);
+ if (pi_operation)
+ break;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, offset);
+ erasesize = mtd->eraseregions[rgn].erasesize;
+ } else
+ erasesize = mtd->erasesize;
+
+ memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
- (mtd->erasesize >> 5));
+ (erasesize >> 5));
break;
default:
@@ -339,7 +404,7 @@
}
if (block != -1)
- offset += block << this->erase_shift;
+ offset = onenand_addr(this, block);
if (page != -1)
offset += page << this->page_shift;
@@ -390,6 +455,7 @@
}
density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density &= ONENAND_DEVICE_DENSITY_MASK;
size = ((16 << 20) << density);
ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
@@ -405,8 +471,9 @@
writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
+ writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
- if (density < 2)
+ if (density < 2 && (!CONFIG_FLEXONENAND))
buffer_size = 0x0400; /* 1KiB page */
else
buffer_size = 0x0800; /* 2KiB page */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 892a9e4..c155bd3 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1725,6 +1725,7 @@
config KS8842
tristate "Micrel KSZ8842"
+ depends on HAS_IOMEM
help
This platform driver is for Micrel KSZ8842 chip.
@@ -2443,6 +2444,17 @@
To compile this driver as a module, choose M here. The module
will be called jme.
+config S6GMAC
+ tristate "S6105 GMAC ethernet support"
+ depends on XTENSA_VARIANT_S6000
+ select PHYLIB
+ help
+ This driver supports the on chip ethernet device on the
+ S6105 xtensa processor.
+
+ To compile this driver as a module, choose M here. The module
+ will be called s6gmac.
+
endif # NETDEV_1000
#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d366fb2..4b58a59 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -245,6 +245,7 @@
obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_MACB) += macb.o
+obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_ARM) += arm/
obj-$(CONFIG_DEV_APPLETALK) += appletalk/
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b4bb06f..f703758 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -65,7 +65,7 @@
#define TX_CQ_LEN 1024
#define RX_Q_LEN 1024 /* Does not support any other value */
#define RX_CQ_LEN 1024
-#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
+#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
#define BE_NAPI_WEIGHT 64
@@ -91,6 +91,61 @@
atomic_t used; /* Number of valid elements in the queue */
};
+static inline u32 MODULO(u16 val, u16 limit)
+{
+ BUG_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_adv(u16 *index, u16 val, u16 limit)
+{
+ *index = MODULO((*index + val), limit);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+
+struct be_eq_obj {
+ struct be_queue_info q;
+ char desc[32];
+
+ /* Adaptive interrupt coalescing (AIC) info */
+ bool enable_aic;
+ u16 min_eqd; /* in usecs */
+ u16 max_eqd; /* in usecs */
+ u16 cur_eqd; /* in usecs */
+
+ struct napi_struct napi;
+};
+
+struct be_mcc_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+};
+
struct be_ctrl_info {
u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
@@ -98,11 +153,20 @@
int pci_func;
/* Mbox used for cmd request/response */
- spinlock_t cmd_lock; /* For serializing cmds to BE card */
+ spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
struct be_dma_mem mbox_mem_alloced;
+
+ /* MCC Rings */
+ struct be_mcc_obj mcc_obj;
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ /* MCC Async callback */
+ void (*async_cb)(void *adapter, bool link_up);
+ void *adapter_ctxt;
};
#include "be_cmds.h"
@@ -150,19 +214,6 @@
struct be_dma_mem cmd;
};
-struct be_eq_obj {
- struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u16 min_eqd; /* in usecs */
- u16 max_eqd; /* in usecs */
- u16 cur_eqd; /* in usecs */
-
- struct napi_struct napi;
-};
-
struct be_tx_obj {
struct be_queue_info q;
struct be_queue_info cq;
@@ -225,8 +276,9 @@
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
- struct be_link_info link;
+ bool link_up;
u32 port_num;
+ bool promiscuous;
};
extern struct ethtool_ops be_ethtool_ops;
@@ -235,22 +287,6 @@
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
-static inline u32 MODULO(u16 val, u16 limit)
-{
- BUG_ON(limit & (limit - 1));
- return val & (limit - 1);
-}
-
-static inline void index_adv(u16 *index, u16 val, u16 limit)
-{
- *index = MODULO((*index + val), limit);
-}
-
-static inline void index_inc(u16 *index, u16 limit)
-{
- *index = MODULO((*index + 1), limit);
-}
-
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
@@ -339,4 +375,6 @@
return val;
}
+extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
+ u16 num_popped);
#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d444aed..583517e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -17,6 +17,133 @@
#include "be.h"
+static void be_mcc_notify(struct be_ctrl_info *ctrl)
+{
+ struct be_queue_info *mccq = &ctrl->mcc_obj.q;
+ u32 val = 0;
+
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+ iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
+}
+
+/* To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new entry is
+ * little endian) */
+static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
+{
+ if (compl->flags != 0) {
+ compl->flags = le32_to_cpu(compl->flags);
+ BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* Need to reset the entire word that houses the valid bit */
+static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
+{
+ compl->flags = 0;
+}
+
+static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
+ struct be_mcc_cq_entry *compl)
+{
+ u16 compl_status, extd_status;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ if (compl_status != MCC_STATUS_SUCCESS) {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ printk(KERN_WARNING DRV_NAME
+ " error in cmd completion: status(compl/extd)=%d/%d\n",
+ compl_status, extd_status);
+ return -1;
+ }
+ return 0;
+}
+
+/* Link state evt is a string of bytes; no need for endian swapping */
+static void be_async_link_state_process(struct be_ctrl_info *ctrl,
+ struct be_async_event_link_state *evt)
+{
+ ctrl->async_cb(ctrl->adapter_ctxt,
+ evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
+}
+
+static inline bool is_link_state_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE);
+}
+
+static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
+{
+ struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
+ struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
+
+ if (be_mcc_compl_is_new(compl)) {
+ queue_tail_inc(mcc_cq);
+ return compl;
+ }
+ return NULL;
+}
+
+void be_process_mcc(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_cq_entry *compl;
+ int num = 0;
+
+ spin_lock_bh(&ctrl->mcc_cq_lock);
+ while ((compl = be_mcc_compl_get(ctrl))) {
+ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ BUG_ON(!is_link_state_evt(compl->flags));
+
+ /* Interpret compl as a async link evt */
+ be_async_link_state_process(ctrl,
+ (struct be_async_event_link_state *) compl);
+ } else {
+ be_mcc_compl_process(ctrl, compl);
+ atomic_dec(&ctrl->mcc_obj.q.used);
+ }
+ be_mcc_compl_use(compl);
+ num++;
+ }
+ if (num)
+ be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
+ spin_unlock_bh(&ctrl->mcc_cq_lock);
+}
+
+/* Wait till no more pending mcc requests are present */
+static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
+{
+#define mcc_timeout 50000 /* 5s timeout */
+ int i;
+ for (i = 0; i < mcc_timeout; i++) {
+ be_process_mcc(ctrl);
+ if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
+ break;
+ udelay(100);
+ }
+ if (i == mcc_timeout)
+ printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
+}
+
+/* Notify MCC requests and wait for completion */
+static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
+{
+ be_mcc_notify(ctrl);
+ be_mcc_wait_compl(ctrl);
+}
+
static int be_mbox_db_ready_wait(void __iomem *db)
{
int cnt = 0, wait = 5;
@@ -44,11 +171,11 @@
/*
* Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
*/
static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
{
int status;
- u16 compl_status, extd_status;
u32 val = 0;
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
@@ -79,24 +206,17 @@
if (status != 0)
return status;
- /* compl entry has been made now */
- be_dws_le_to_cpu(cqe, sizeof(*cqe));
- if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) {
- printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n");
+ /* A cq entry has been made now */
+ if (be_mcc_compl_is_new(cqe)) {
+ status = be_mcc_compl_process(ctrl, &mbox->cqe);
+ be_mcc_compl_use(cqe);
+ if (status)
+ return status;
+ } else {
+ printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
return -1;
}
-
- compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
- if (compl_status != MCC_STATUS_SUCCESS) {
- extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
- printk(KERN_WARNING DRV_NAME
- ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
- compl_status, extd_status);
- }
-
- return compl_status;
+ return 0;
}
static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
@@ -235,6 +355,18 @@
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
}
+static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
+{
+ struct be_mcc_wrb *wrb = NULL;
+ if (atomic_read(&mccq->used) < mccq->len) {
+ wrb = queue_head_node(mccq);
+ queue_head_inc(mccq);
+ atomic_inc(&mccq->used);
+ memset(wrb, 0, sizeof(*wrb));
+ }
+ return wrb;
+}
+
int be_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay)
{
@@ -244,7 +376,7 @@
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -272,7 +404,7 @@
eq->id = le16_to_cpu(resp->eq_id);
eq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -284,7 +416,7 @@
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -304,7 +436,7 @@
if (!status)
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -315,7 +447,7 @@
struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -332,7 +464,7 @@
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -342,7 +474,7 @@
struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -354,7 +486,7 @@
req->pmac_id = cpu_to_le32(pmac_id);
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -370,7 +502,7 @@
void *ctxt = &req->context;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -388,7 +520,7 @@
AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -399,7 +531,56 @@
cq->id = le16_to_cpu(resp->cq_id);
cq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt = &req->context;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+ AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
+ AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_db_ring(ctrl);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -415,7 +596,7 @@
int status;
u32 len_encoded;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -446,7 +627,7 @@
txq->id = le16_to_cpu(resp->cid);
txq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -460,7 +641,7 @@
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -482,7 +663,7 @@
rxq->id = le16_to_cpu(resp->id);
rxq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -496,7 +677,7 @@
u8 subsys = 0, opcode = 0;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -518,6 +699,10 @@
subsys = CMD_SUBSYSTEM_ETH;
opcode = OPCODE_ETH_RX_DESTROY;
break;
+ case QTYPE_MCCQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_MCC_DESTROY;
+ break;
default:
printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
status = -1;
@@ -528,7 +713,7 @@
status = be_mbox_db_ring(ctrl);
err:
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -541,7 +726,7 @@
struct be_cmd_req_if_create *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -562,7 +747,7 @@
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -572,7 +757,7 @@
struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -583,7 +768,7 @@
req->interface_id = cpu_to_le32(interface_id);
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -598,7 +783,7 @@
struct be_sge *sge = nonembedded_sgl(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
memset(req, 0, sizeof(*req));
@@ -617,18 +802,20 @@
be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
- struct be_link_info *link)
+ bool *link_up)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_link_status *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
+
+ *link_up = false;
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -639,14 +826,11 @@
status = be_mbox_db_ring(ctrl);
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
- link->speed = resp->mac_speed;
- link->duplex = resp->mac_duplex;
- link->fault = resp->mac_fault;
- } else {
- link->speed = PHY_LINK_SPEED_ZERO;
+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
+ *link_up = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -656,7 +840,7 @@
struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -670,7 +854,7 @@
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -681,7 +865,7 @@
struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -696,7 +880,7 @@
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -707,7 +891,7 @@
struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -726,18 +910,22 @@
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
+/* Use MCC for this command as it may be called in BH context */
int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
- int status;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_promiscuous_config *req;
- spin_lock(&ctrl->cmd_lock);
- memset(wrb, 0, sizeof(*wrb));
+ spin_lock_bh(&ctrl->mcc_lock);
+
+ wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+ BUG_ON(!wrb);
+
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -749,21 +937,29 @@
else
req->port0_promiscuous = en;
- status = be_mbox_db_ring(ctrl);
+ be_mcc_notify_wait(ctrl);
- spin_unlock(&ctrl->cmd_lock);
- return status;
+ spin_unlock_bh(&ctrl->mcc_lock);
+ return 0;
}
-int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
- u32 num, bool promiscuous)
+/*
+ * Use MCC for this command as it may be called in BH context
+ * (mc == NULL) => multicast promiscous
+ */
+int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+ struct dev_mc_list *mc_list, u32 mc_count)
{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
- int status;
+#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcast_mac_config *req;
- spin_lock(&ctrl->cmd_lock);
- memset(wrb, 0, sizeof(*wrb));
+ spin_lock_bh(&ctrl->mcc_lock);
+
+ wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+ BUG_ON(!wrb);
+
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -771,17 +967,23 @@
OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
req->interface_id = if_id;
- req->promiscuous = promiscuous;
- if (!promiscuous) {
- req->num_mac = cpu_to_le16(num);
- if (num)
- memcpy(req->mac, mac_table, ETH_ALEN * num);
+ if (mc_list && mc_count <= BE_MAX_MC) {
+ int i;
+ struct dev_mc_list *mc;
+
+ req->num_mac = cpu_to_le16(mc_count);
+
+ for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
+ memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+ } else {
+ req->promiscuous = 1;
}
- status = be_mbox_db_ring(ctrl);
+ be_mcc_notify_wait(ctrl);
- spin_unlock(&ctrl->cmd_lock);
- return status;
+ spin_unlock_bh(&ctrl->mcc_lock);
+
+ return 0;
}
int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
@@ -790,7 +992,7 @@
struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -804,7 +1006,7 @@
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -814,7 +1016,7 @@
struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -831,7 +1033,7 @@
*rx_fc = le16_to_cpu(resp->rx_flow_control);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -841,7 +1043,7 @@
struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -856,6 +1058,6 @@
*port_num = le32_to_cpu(resp->phys_port);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e499e2d..747626d 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -76,6 +76,34 @@
u32 flags; /* dword 3 */
};
+/* When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+struct be_async_event_trailer {
+ u32 code;
+};
+
+enum {
+ ASYNC_EVENT_LINK_DOWN = 0x0,
+ ASYNC_EVENT_LINK_UP = 0x1
+};
+
+/* When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+ u8 port_fault;
+ u8 rsvd0[7];
+ struct be_async_event_trailer trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_cq_entry cqe;
@@ -101,6 +129,7 @@
#define OPCODE_COMMON_FIRMWARE_CONFIG 42
#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
+#define OPCODE_COMMON_MCC_DESTROY 53
#define OPCODE_COMMON_CQ_DESTROY 54
#define OPCODE_COMMON_EQ_DESTROY 55
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
@@ -269,6 +298,38 @@
u16 rsvd0;
} __packed;
+/******************** Create MCCQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_mcc_context {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 rsvd0;
+ u8 context[sizeof(struct amap_mcc_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
/******************** Create TxQ ***************************/
#define BE_ETH_TX_RING_TYPE_STANDARD 2
#define BE_ULP1_NUM 1
@@ -341,7 +402,8 @@
QTYPE_EQ = 1,
QTYPE_CQ,
QTYPE_TXQ,
- QTYPE_RXQ
+ QTYPE_RXQ,
+ QTYPE_MCCQ
};
struct be_cmd_req_q_destroy {
@@ -546,12 +608,6 @@
u32 rsvd;
};
-struct be_link_info {
- u8 duplex;
- u8 speed;
- u8 fault;
-};
-
enum {
PHY_LINK_DUPLEX_NONE = 0x0,
PHY_LINK_DUPLEX_HALF = 0x1,
@@ -657,6 +713,9 @@
struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay,
int num_cqe_dma_coalesce);
+extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq);
extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *txq,
struct be_queue_info *cq);
@@ -667,7 +726,7 @@
extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
int type);
extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
- struct be_link_info *link);
+ bool *link_up);
extern int be_cmd_reset(struct be_ctrl_info *ctrl);
extern int be_cmd_get_stats(struct be_ctrl_info *ctrl,
struct be_dma_mem *nonemb_cmd);
@@ -679,10 +738,11 @@
bool promiscuous);
extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl,
u8 port_num, bool en);
-extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id,
- u8 *mac_table, u32 num, bool promiscuous);
+extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+ struct dev_mc_list *mc_list, u32 mc_count);
extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
u32 *tx_fc, u32 *rx_fc);
extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
+extern void be_process_mcc(struct be_ctrl_info *ctrl);
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index b132aa4..b02e805 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -61,7 +61,7 @@
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
-#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
/* Number of event entries processed */
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -88,6 +88,12 @@
/* Number of rx frags posted */
#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+
/*
* BE descriptors: host memory data structures whose formats
* are hardwired in BE silicon.
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66bb568..66c10c8 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -60,26 +60,6 @@
return 0;
}
-static inline void *queue_head_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->head * q->entry_size;
-}
-
-static inline void *queue_tail_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->tail * q->entry_size;
-}
-
-static inline void queue_head_inc(struct be_queue_info *q)
-{
- index_inc(&q->head, q->len);
-}
-
-static inline void queue_tail_inc(struct be_queue_info *q)
-{
- index_inc(&q->tail, q->len);
-}
-
static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
{
u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
@@ -127,7 +107,7 @@
iowrite32(val, ctrl->db + DB_EQ_OFFSET);
}
-static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
+void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
bool arm, u16 num_popped)
{
u32 val = 0;
@@ -234,28 +214,24 @@
dev_stats->tx_window_errors = 0;
}
-static void be_link_status_update(struct be_adapter *adapter)
+void be_link_status_update(void *ctxt, bool link_up)
{
- struct be_link_info *prev = &adapter->link;
- struct be_link_info now = { 0 };
+ struct be_adapter *adapter = ctxt;
struct net_device *netdev = adapter->netdev;
- be_cmd_link_status_query(&adapter->ctrl, &now);
-
/* If link came up or went down */
- if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
- prev->speed == PHY_LINK_SPEED_ZERO)) {
- if (now.speed == PHY_LINK_SPEED_ZERO) {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
- printk(KERN_INFO "%s: Link down\n", netdev->name);
- } else {
+ if (adapter->link_up != link_up) {
+ if (link_up) {
netif_start_queue(netdev);
netif_carrier_on(netdev);
printk(KERN_INFO "%s: Link up\n", netdev->name);
+ } else {
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ printk(KERN_INFO "%s: Link down\n", netdev->name);
}
+ adapter->link_up = link_up;
}
- *prev = now;
}
/* Update the EQ delay n BE based on the RX frags consumed / sec */
@@ -569,47 +545,32 @@
be_vid_config(netdev);
}
-static void be_set_multicast_filter(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
- u8 mac_addr[32][ETH_ALEN];
- int i = 0;
-
- if (netdev->flags & IFF_ALLMULTI) {
- /* set BE in Multicast promiscuous */
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, NULL, 0, true);
- return;
- }
-
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
- memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
- if (++i >= 32) {
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, &mac_addr[0][0], i, false);
- i = 0;
- }
-
- }
-
- if (i) {
- /* reset the promiscuous mode also. */
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, &mac_addr[0][0], i, false);
- }
-}
-
static void be_set_multicast_list(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
if (netdev->flags & IFF_PROMISC) {
- be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
- } else {
- be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
- be_set_multicast_filter(netdev);
+ be_cmd_promiscuous_config(ctrl, adapter->port_num, 1);
+ adapter->promiscuous = true;
+ goto done;
}
+
+ /* BE was previously in promiscous mode; disable it */
+ if (adapter->promiscuous) {
+ adapter->promiscuous = false;
+ be_cmd_promiscuous_config(ctrl, adapter->port_num, 0);
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0);
+ goto done;
+ }
+
+ be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list,
+ netdev->mc_count);
+done:
+ return;
}
static void be_rx_rate_update(struct be_adapter *adapter)
@@ -960,10 +921,8 @@
return;
}
-static struct be_eth_tx_compl *
-be_tx_compl_get(struct be_adapter *adapter)
+static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
{
- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
@@ -1051,6 +1010,59 @@
}
}
+static void be_mcc_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ q = &ctrl->mcc_obj.q;
+ if (q->created)
+ be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
+ be_queue_free(adapter, q);
+
+ q = &ctrl->mcc_obj.cq;
+ if (q->created)
+ be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+}
+
+/* Must be called only after TX qs are created as MCC shares TX EQ */
+static int be_mcc_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *q, *cq;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ /* Alloc MCC compl queue */
+ cq = &ctrl->mcc_obj.cq;
+ if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
+ sizeof(struct be_mcc_cq_entry)))
+ goto err;
+
+ /* Ask BE to create MCC compl queue; share TX's eq */
+ if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
+ goto mcc_cq_free;
+
+ /* Alloc MCC queue */
+ q = &ctrl->mcc_obj.q;
+ if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+ goto mcc_cq_destroy;
+
+ /* Ask BE to create MCC queue */
+ if (be_cmd_mccq_create(ctrl, q, cq))
+ goto mcc_q_free;
+
+ return 0;
+
+mcc_q_free:
+ be_queue_free(adapter, q);
+mcc_cq_destroy:
+ be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
+mcc_cq_free:
+ be_queue_free(adapter, cq);
+err:
+ return -1;
+}
+
static void be_tx_queues_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
@@ -1263,7 +1275,7 @@
return IRQ_HANDLED;
}
-static irqreturn_t be_msix_tx(int irq, void *dev)
+static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
{
struct be_adapter *adapter = dev;
@@ -1324,40 +1336,51 @@
return work_done;
}
-/* For TX we don't honour budget; consume everything */
-int be_poll_tx(struct napi_struct *napi, int budget)
+void be_process_tx(struct be_adapter *adapter)
{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
- struct be_tx_obj *tx_obj = &adapter->tx_obj;
- struct be_queue_info *tx_cq = &tx_obj->cq;
- struct be_queue_info *txq = &tx_obj->q;
+ struct be_queue_info *txq = &adapter->tx_obj.q;
+ struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
u32 num_cmpl = 0;
u16 end_idx;
- while ((txcp = be_tx_compl_get(adapter))) {
+ while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
be_tx_compl_process(adapter, end_idx);
num_cmpl++;
}
- /* As Tx wrbs have been freed up, wake up netdev queue if
- * it was stopped due to lack of tx wrbs.
- */
- if (netif_queue_stopped(adapter->netdev) &&
+ if (num_cmpl) {
+ be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+
+ /* As Tx wrbs have been freed up, wake up netdev queue if
+ * it was stopped due to lack of tx wrbs.
+ */
+ if (netif_queue_stopped(adapter->netdev) &&
atomic_read(&txq->used) < txq->len / 2) {
- netif_wake_queue(adapter->netdev);
+ netif_wake_queue(adapter->netdev);
+ }
+
+ drvr_stats(adapter)->be_tx_events++;
+ drvr_stats(adapter)->be_tx_compl += num_cmpl;
}
+}
+
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
napi_complete(napi);
- be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+ be_process_tx(adapter);
- drvr_stats(adapter)->be_tx_events++;
- drvr_stats(adapter)->be_tx_compl += num_cmpl;
+ be_process_mcc(&adapter->ctrl);
return 1;
}
@@ -1368,9 +1391,6 @@
container_of(work, struct be_adapter, work.work);
int status;
- /* Check link */
- be_link_status_update(adapter);
-
/* Get Stats */
status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
if (!status)
@@ -1419,7 +1439,7 @@
sprintf(tx_eq->desc, "%s-tx", netdev->name);
vec = be_msix_vec_get(adapter, tx_eq->q.id);
- status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
+ status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
if (status)
goto err;
@@ -1495,6 +1515,39 @@
struct be_ctrl_info *ctrl = &adapter->ctrl;
struct be_eq_obj *rx_eq = &adapter->rx_eq;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ bool link_up;
+ int status;
+
+ /* First time posting */
+ be_post_rx_frags(adapter);
+
+ napi_enable(&rx_eq->napi);
+ napi_enable(&tx_eq->napi);
+
+ be_irq_register(adapter);
+
+ be_intr_set(ctrl, true);
+
+ /* The evt queues are created in unarmed state; arm them */
+ be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
+ be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
+
+ /* Rx compl queue may be in unarmed state; rearm it */
+ be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
+
+ status = be_cmd_link_status_query(ctrl, &link_up);
+ if (status)
+ return status;
+ be_link_status_update(adapter, link_up);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+ return 0;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+ struct net_device *netdev = adapter->netdev;
u32 if_flags;
int status;
@@ -1521,29 +1574,14 @@
if (status != 0)
goto tx_qs_destroy;
- /* First time posting */
- be_post_rx_frags(adapter);
+ status = be_mcc_queues_create(adapter);
+ if (status != 0)
+ goto rx_qs_destroy;
- napi_enable(&rx_eq->napi);
- napi_enable(&tx_eq->napi);
-
- be_irq_register(adapter);
-
- be_intr_set(ctrl, true);
-
- /* The evt queues are created in the unarmed state; arm them */
- be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
- be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
-
- /* The compl queues are created in the unarmed state; arm them */
- be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
- be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
-
- be_link_status_update(adapter);
-
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
+rx_qs_destroy:
+ be_rx_queues_destroy(adapter);
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
@@ -1552,6 +1590,19 @@
return status;
}
+static int be_clear(struct be_adapter *adapter)
+{
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ be_rx_queues_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+
+ be_cmd_if_destroy(ctrl, adapter->if_handle);
+
+ be_mcc_queues_destroy(adapter);
+ return 0;
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1564,7 +1615,7 @@
netif_stop_queue(netdev);
netif_carrier_off(netdev);
- adapter->link.speed = PHY_LINK_SPEED_ZERO;
+ adapter->link_up = false;
be_intr_set(ctrl, false);
@@ -1581,10 +1632,6 @@
napi_disable(&rx_eq->napi);
napi_disable(&tx_eq->napi);
- be_rx_queues_destroy(adapter);
- be_tx_queues_destroy(adapter);
-
- be_cmd_if_destroy(ctrl, adapter->if_handle);
return 0;
}
@@ -1673,7 +1720,7 @@
netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
BE_NAPI_WEIGHT);
- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
+ netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
netif_carrier_off(netdev);
@@ -1755,7 +1802,12 @@
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
- spin_lock_init(&ctrl->cmd_lock);
+ spin_lock_init(&ctrl->mbox_lock);
+ spin_lock_init(&ctrl->mcc_lock);
+ spin_lock_init(&ctrl->mcc_cq_lock);
+
+ ctrl->async_cb = be_link_status_update;
+ ctrl->adapter_ctxt = adapter;
val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
@@ -1793,6 +1845,8 @@
unregister_netdev(adapter->netdev);
+ be_clear(adapter);
+
be_stats_cleanup(adapter);
be_ctrl_cleanup(adapter);
@@ -1890,13 +1944,18 @@
be_netdev_init(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
+ status = be_setup(adapter);
+ if (status)
+ goto stats_clean;
status = register_netdev(netdev);
if (status != 0)
- goto stats_clean;
+ goto unsetup;
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
return 0;
+unsetup:
+ be_clear(adapter);
stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
@@ -1921,6 +1980,7 @@
if (netif_running(netdev)) {
rtnl_lock();
be_close(netdev);
+ be_clear(adapter);
rtnl_unlock();
}
@@ -1947,6 +2007,7 @@
if (netif_running(netdev)) {
rtnl_lock();
+ be_setup(adapter);
be_open(netdev);
rtnl_unlock();
}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 38f1c33..b70cc99 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -6825,6 +6825,14 @@
return 0;
}
+static u32
+bnx2_get_link(struct net_device *dev)
+{
+ struct bnx2 *bp = netdev_priv(dev);
+
+ return bp->link_up;
+}
+
static int
bnx2_get_eeprom_len(struct net_device *dev)
{
@@ -7392,7 +7400,7 @@
.get_wol = bnx2_get_wol,
.set_wol = bnx2_set_wol,
.nway_reset = bnx2_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = bnx2_get_link,
.get_eeprom_len = bnx2_get_eeprom_len,
.get_eeprom = bnx2_get_eeprom,
.set_eeprom = bnx2_set_eeprom,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5e18812..33821a8 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -36,7 +36,7 @@
If unsure, say Y.
config CAN_SJA1000
- depends on CAN_DEV
+ depends on CAN_DEV && HAS_IOMEM
tristate "Philips SJA1000"
---help---
Driver for the SJA1000 CAN controllers from Philips or NXP
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 44f77eb..4d1515f 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -25,8 +25,6 @@
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/module.h>
-
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
@@ -2521,9 +2519,9 @@
struct cnic_dev *cdev;
struct cnic_local *cp;
struct cnic_eth_dev *ethdev = NULL;
- struct cnic_eth_dev *(*probe)(void *) = NULL;
+ struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
- probe = __symbol_get("bnx2_cnic_probe");
+ probe = symbol_get(bnx2_cnic_probe);
if (probe) {
ethdev = (*probe)(dev);
symbol_put_addr(probe);
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 0638096..d1bce27 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -296,4 +296,6 @@
extern int cnic_unregister_driver(int ulp_type);
+extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
+
#endif
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 677f604..679885a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1997,7 +1997,7 @@
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
struct e1000_hw *hw = &adapter->hw;
struct net_device *poll_dev = adapter->netdev;
- int tx_cleaned = 0, work_done = 0;
+ int tx_cleaned = 1, work_done = 0;
adapter = netdev_priv(poll_dev);
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index e02bafd..93f4abd 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -668,7 +668,7 @@
queue_work(mdev->workqueue, &priv->mcast_task);
priv->port_up = true;
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
mac_err:
@@ -700,14 +700,14 @@
en_dbg(DRV, priv, "stop port called while port already down\n");
return;
}
- netif_stop_queue(dev);
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
- priv->port_up = false;
+ netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
/* close port*/
+ priv->port_up = false;
mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Unregister Mac address for the port */
@@ -881,7 +881,6 @@
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
cancel_delayed_work(&priv->stats_task);
- cancel_delayed_work(&priv->refill_task);
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
@@ -986,7 +985,6 @@
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
- INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 5a14899..91bdfdf 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -269,31 +269,6 @@
return 0;
}
-static int mlx4_en_fill_rx_buf(struct net_device *dev,
- struct mlx4_en_rx_ring *ring)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- int num = 0;
- int err;
-
- while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
- err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
- ring->size_mask);
- if (err) {
- if (netif_msg_rx_err(priv))
- en_warn(priv, "Failed preparing rx descriptor\n");
- priv->port_stats.rx_alloc_failed++;
- break;
- }
- ++num;
- ++ring->prod;
- }
- if ((u32) (ring->prod - ring->cons) == ring->actual_size)
- ring->full = 1;
-
- return num;
-}
-
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
@@ -312,42 +287,6 @@
}
}
-
-void mlx4_en_rx_refill(struct work_struct *work)
-{
- struct delayed_work *delay = to_delayed_work(work);
- struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
- refill_task);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct net_device *dev = priv->dev;
- struct mlx4_en_rx_ring *ring;
- int need_refill = 0;
- int i;
-
- mutex_lock(&mdev->state_lock);
- if (!mdev->device_up || !priv->port_up)
- goto out;
-
- /* We only get here if there are no receive buffers, so we can't race
- * with Rx interrupts while filling buffers */
- for (i = 0; i < priv->rx_ring_num; i++) {
- ring = &priv->rx_ring[i];
- if (ring->need_refill) {
- if (mlx4_en_fill_rx_buf(dev, ring)) {
- ring->need_refill = 0;
- mlx4_en_update_rx_prod_db(ring);
- } else
- need_refill = 1;
- }
- }
- if (need_refill)
- queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
-
-out:
- mutex_unlock(&mdev->state_lock);
-}
-
-
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
@@ -457,9 +396,6 @@
ring_ind--;
goto err_allocator;
}
-
- /* Fill Rx buffers */
- ring->full = 0;
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
@@ -647,33 +583,6 @@
return skb;
}
-static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
- int from, int to, int num)
-{
- struct skb_frag_struct *skb_frags_from;
- struct skb_frag_struct *skb_frags_to;
- struct mlx4_en_rx_desc *rx_desc_from;
- struct mlx4_en_rx_desc *rx_desc_to;
- int from_index, to_index;
- int nr, i;
-
- for (i = 0; i < num; i++) {
- from_index = (from + i) & ring->size_mask;
- to_index = (to + i) & ring->size_mask;
- skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
- skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
- rx_desc_from = ring->buf + (from_index << ring->log_stride);
- rx_desc_to = ring->buf + (to_index << ring->log_stride);
-
- for (nr = 0; nr < priv->num_frags; nr++) {
- skb_frags_to[nr].page = skb_frags_from[nr].page;
- skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
- rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
- }
- }
-}
-
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
@@ -821,11 +730,6 @@
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
ring->prod += polled; /* Polled descriptors were realocated in place */
- if (unlikely(!ring->full)) {
- mlx4_en_copy_desc(priv, ring, ring->cons - polled,
- ring->prod - polled, polled);
- mlx4_en_fill_rx_buf(dev, ring);
- }
mlx4_en_update_rx_prod_db(ring);
return polled;
}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 5dc7466..08c43f2 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -515,16 +515,9 @@
else {
if (netif_msg_tx_err(priv))
en_warn(priv, "Non-linear headers\n");
- dev_kfree_skb_any(skb);
return 0;
}
}
- if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "LSO header size too big\n");
- dev_kfree_skb_any(skb);
- return 0;
- }
} else {
*lso_header_size = 0;
if (!is_inline(skb, NULL))
@@ -616,13 +609,9 @@
int lso_header_size;
void *fragptr;
- if (unlikely(!skb->len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
real_size = get_real_size(skb, dev, &lso_header_size);
if (unlikely(!real_size))
- return NETDEV_TX_OK;
+ goto tx_drop;
/* Allign descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -630,8 +619,7 @@
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ goto tx_drop;
}
tx_ind = skb->queue_mapping;
@@ -653,14 +641,6 @@
return NETDEV_TX_BUSY;
}
- /* Now that we know what Tx ring to use */
- if (unlikely(!priv->port_up)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "xmit: port down!\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32) (ring->prod - ring->cons - 1));
@@ -785,5 +765,10 @@
mlx4_en_xmit_poll(priv, tx_ind);
return 0;
+
+tx_drop:
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index d43a9e4..c7c5e86 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -99,7 +99,6 @@
#define RSS_FACTOR 2
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
-#define MAX_LSO_HDR_SIZE 92
#define STAMP_STRIDE 64
#define STAMP_DWORDS (STAMP_STRIDE / 4)
#define STAMP_SHIFT 31
@@ -296,8 +295,6 @@
u32 prod;
u32 cons;
u32 buf_size;
- int need_refill;
- int full;
void *buf;
void *rx_info;
unsigned long bytes;
@@ -495,7 +492,6 @@
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct work_struct mcast_task;
struct work_struct mac_task;
- struct delayed_work refill_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
@@ -565,7 +561,6 @@
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
-void mlx4_en_rx_refill(struct work_struct *work);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 5887e47..f96948b 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -399,11 +399,14 @@
if (!mtts)
return -ENOMEM;
+ dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+ npages * sizeof (u64), DMA_TO_DEVICE);
+
for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
- npages * sizeof (u64), DMA_TO_DEVICE);
+ dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
+ npages * sizeof (u64), DMA_TO_DEVICE);
return 0;
}
@@ -547,11 +550,14 @@
/* Make sure MPT status is visible before writing MTT entries */
wmb();
+ dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
+ npages * sizeof(u64), DMA_TO_DEVICE);
+
for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
- npages * sizeof(u64), DMA_TO_DEVICE);
+ dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
+ npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key);
fmr->mpt->lkey = cpu_to_be32(key);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 745ae8b4..0f32db3 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1750,12 +1750,12 @@
uc_addr_set(mp, dev->dev_addr);
- port_config = rdlp(mp, PORT_CONFIG);
+ port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
nibbles = uc_addr_filter_mask(dev);
if (!nibbles) {
port_config |= UNICAST_PROMISCUOUS_MODE;
- wrlp(mp, PORT_CONFIG, port_config);
- return;
+ nibbles = 0xffff;
}
for (i = 0; i < 16; i += 4) {
@@ -1776,7 +1776,6 @@
wrl(mp, off, v);
}
- port_config &= ~UNICAST_PROMISCUOUS_MODE;
wrlp(mp, PORT_CONFIG, port_config);
}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index bdb143d..055bb61 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -944,28 +944,31 @@
u32 val = 0;
int retries = 60;
- if (!pegtune_val) {
- do {
- val = NXRD32(adapter, CRB_CMDPEG_STATE);
+ if (pegtune_val)
+ return 0;
- if (val == PHAN_INITIALIZE_COMPLETE ||
- val == PHAN_INITIALIZE_ACK)
- return 0;
+ do {
+ val = NXRD32(adapter, CRB_CMDPEG_STATE);
- msleep(500);
-
- } while (--retries);
-
- if (!retries) {
- pegtune_val = NXRD32(adapter,
- NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
- printk(KERN_WARNING "netxen_phantom_init: init failed, "
- "pegtune_val=%x\n", pegtune_val);
- return -1;
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
}
- }
- return 0;
+ msleep(500);
+
+ } while (--retries);
+
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_warn(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
}
static int
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 71daa3d..2919a2d 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -705,7 +705,7 @@
first_driver = (adapter->ahw.pci_func == 0);
if (!first_driver)
- return 0;
+ goto wait_init;
first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
@@ -752,6 +752,7 @@
| (_NETXEN_NIC_LINUX_SUBVERSION);
NXWR32(adapter, CRB_DRIVER_VERSION, val);
+wait_init:
/* Handshake with the card before we register the devices. */
err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
if (err) {
@@ -1178,6 +1179,7 @@
free_netdev(netdev);
}
+#ifdef CONFIG_PM
static int
netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1242,6 +1244,7 @@
return 0;
}
+#endif
static int netxen_nic_open(struct net_device *netdev)
{
@@ -1771,8 +1774,10 @@
.id_table = netxen_pci_tbl,
.probe = netxen_nic_probe,
.remove = __devexit_p(netxen_nic_remove),
+#ifdef CONFIG_PM
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume
+#endif
};
/* Driver Registration on NetXen card */
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6de8399..17c116b 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -356,7 +356,6 @@
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
ap_put(ap);
- tty_unthrottle(tty);
}
static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index d2fa2db..aa3d39f 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -397,7 +397,6 @@
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
sp_put(ap);
- tty_unthrottle(tty);
}
static void
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8a823ec..3e4b67a 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -3142,6 +3142,7 @@
(void __iomem *)port_regs;
u32 delay = 10;
int status = 0;
+ unsigned long hw_flags = 0;
if(ql_mii_setup(qdev))
return -1;
@@ -3150,7 +3151,8 @@
ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
(ISP_SERIAL_PORT_IF_WE |
(ISP_SERIAL_PORT_IF_WE << 16)));
-
+ /* Give the PHY time to come out of reset. */
+ mdelay(100);
qdev->port_link_state = LS_DOWN;
netif_carrier_off(qdev->ndev);
@@ -3350,7 +3352,9 @@
value = ql_read_page0_reg(qdev, &port_regs->portStatus);
if (value & PORT_STATUS_IC)
break;
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
msleep(500);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
} while (--delay);
if (delay == 0) {
@@ -3837,7 +3841,9 @@
16) | ISP_CONTROL_RI));
}
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
ssleep(1);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
} while (--max_wait_time);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4e22462..4b53b58 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -51,9 +51,6 @@
#define TX_BUFFS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static const int max_interrupt_work = 20;
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
new file mode 100644
index 0000000..5345e47
--- /dev/null
+++ b/drivers/net/s6gmac.c
@@ -0,0 +1,1073 @@
+/*
+ * Ethernet driver for S6105 on chip network device
+ * (c)2008 emlix GmbH http://www.emlix.com
+ * Authors: Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/stddef.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <variant/hardware.h>
+#include <variant/dmac.h>
+
+#define DRV_NAME "s6gmac"
+#define DRV_PRMT DRV_NAME ": "
+
+
+/* register declarations */
+
+#define S6_GMAC_MACCONF1 0x000
+#define S6_GMAC_MACCONF1_TXENA 0
+#define S6_GMAC_MACCONF1_SYNCTX 1
+#define S6_GMAC_MACCONF1_RXENA 2
+#define S6_GMAC_MACCONF1_SYNCRX 3
+#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
+#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
+#define S6_GMAC_MACCONF1_LOOPBACK 8
+#define S6_GMAC_MACCONF1_RESTXFUNC 16
+#define S6_GMAC_MACCONF1_RESRXFUNC 17
+#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
+#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
+#define S6_GMAC_MACCONF1_SIMULRES 30
+#define S6_GMAC_MACCONF1_SOFTRES 31
+#define S6_GMAC_MACCONF2 0x004
+#define S6_GMAC_MACCONF2_FULL 0
+#define S6_GMAC_MACCONF2_CRCENA 1
+#define S6_GMAC_MACCONF2_PADCRCENA 2
+#define S6_GMAC_MACCONF2_LENGTHFCHK 4
+#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
+#define S6_GMAC_MACCONF2_IFMODE 8
+#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
+#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
+#define S6_GMAC_MACCONF2_IFMODE_MASK 3
+#define S6_GMAC_MACCONF2_PREAMBLELEN 12
+#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
+#define S6_GMAC_MACIPGIFG 0x008
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
+#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
+#define S6_GMAC_MACHALFDUPLEX 0x00C
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
+#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
+#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
+#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
+#define S6_GMAC_MACMAXFRAMELEN 0x010
+#define S6_GMAC_MACMIICONF 0x020
+#define S6_GMAC_MACMIICONF_CSEL 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
+#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
+#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
+#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
+#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
+#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
+#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
+#define S6_GMAC_MACMIICONF_CSEL_MASK 7
+#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
+#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
+#define S6_GMAC_MACMIICMD 0x024
+#define S6_GMAC_MACMIICMD_READ 0
+#define S6_GMAC_MACMIICMD_SCAN 1
+#define S6_GMAC_MACMIIADDR 0x028
+#define S6_GMAC_MACMIIADDR_REG 0
+#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
+#define S6_GMAC_MACMIIADDR_PHY 8
+#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
+#define S6_GMAC_MACMIICTRL 0x02C
+#define S6_GMAC_MACMIISTAT 0x030
+#define S6_GMAC_MACMIIINDI 0x034
+#define S6_GMAC_MACMIIINDI_BUSY 0
+#define S6_GMAC_MACMIIINDI_SCAN 1
+#define S6_GMAC_MACMIIINDI_INVAL 2
+#define S6_GMAC_MACINTERFSTAT 0x03C
+#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
+#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
+#define S6_GMAC_MACSTATADDR1 0x040
+#define S6_GMAC_MACSTATADDR2 0x044
+
+#define S6_GMAC_FIFOCONF0 0x048
+#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
+#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
+#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
+#define S6_GMAC_FIFOCONF0_HSTRSTST 3
+#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
+#define S6_GMAC_FIFOCONF0_WTMENREQ 8
+#define S6_GMAC_FIFOCONF0_SRFENREQ 9
+#define S6_GMAC_FIFOCONF0_FRFENREQ 10
+#define S6_GMAC_FIFOCONF0_STFENREQ 11
+#define S6_GMAC_FIFOCONF0_FTFENREQ 12
+#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
+#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
+#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
+#define S6_GMAC_FIFOCONF0_STFENRPLY 19
+#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
+#define S6_GMAC_FIFOCONF1 0x04C
+#define S6_GMAC_FIFOCONF2 0x050
+#define S6_GMAC_FIFOCONF2_CFGLWM 0
+#define S6_GMAC_FIFOCONF2_CFGHWM 16
+#define S6_GMAC_FIFOCONF3 0x054
+#define S6_GMAC_FIFOCONF3_CFGFTTH 0
+#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
+#define S6_GMAC_FIFOCONF4 0x058
+#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
+#define S6_GMAC_FIFOCONF_RSV_RUNT 1
+#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
+#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
+#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
+#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
+#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
+#define S6_GMAC_FIFOCONF_RSV_OK 7
+#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
+#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
+#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
+#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
+#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
+#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
+#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
+#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
+#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
+#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
+#define S6_GMAC_FIFOCONF5 0x05C
+#define S6_GMAC_FIFOCONF5_DROPLT64 18
+#define S6_GMAC_FIFOCONF5_CFGBYTM 19
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
+
+#define S6_GMAC_STAT_REGS 0x080
+#define S6_GMAC_STAT_SIZE_MIN 12
+#define S6_GMAC_STATTR64 0x080
+#define S6_GMAC_STATTR64_SIZE 18
+#define S6_GMAC_STATTR127 0x084
+#define S6_GMAC_STATTR127_SIZE 18
+#define S6_GMAC_STATTR255 0x088
+#define S6_GMAC_STATTR255_SIZE 18
+#define S6_GMAC_STATTR511 0x08C
+#define S6_GMAC_STATTR511_SIZE 18
+#define S6_GMAC_STATTR1K 0x090
+#define S6_GMAC_STATTR1K_SIZE 18
+#define S6_GMAC_STATTRMAX 0x094
+#define S6_GMAC_STATTRMAX_SIZE 18
+#define S6_GMAC_STATTRMGV 0x098
+#define S6_GMAC_STATTRMGV_SIZE 18
+#define S6_GMAC_STATRBYT 0x09C
+#define S6_GMAC_STATRBYT_SIZE 24
+#define S6_GMAC_STATRPKT 0x0A0
+#define S6_GMAC_STATRPKT_SIZE 18
+#define S6_GMAC_STATRFCS 0x0A4
+#define S6_GMAC_STATRFCS_SIZE 12
+#define S6_GMAC_STATRMCA 0x0A8
+#define S6_GMAC_STATRMCA_SIZE 18
+#define S6_GMAC_STATRBCA 0x0AC
+#define S6_GMAC_STATRBCA_SIZE 22
+#define S6_GMAC_STATRXCF 0x0B0
+#define S6_GMAC_STATRXCF_SIZE 18
+#define S6_GMAC_STATRXPF 0x0B4
+#define S6_GMAC_STATRXPF_SIZE 12
+#define S6_GMAC_STATRXUO 0x0B8
+#define S6_GMAC_STATRXUO_SIZE 12
+#define S6_GMAC_STATRALN 0x0BC
+#define S6_GMAC_STATRALN_SIZE 12
+#define S6_GMAC_STATRFLR 0x0C0
+#define S6_GMAC_STATRFLR_SIZE 16
+#define S6_GMAC_STATRCDE 0x0C4
+#define S6_GMAC_STATRCDE_SIZE 12
+#define S6_GMAC_STATRCSE 0x0C8
+#define S6_GMAC_STATRCSE_SIZE 12
+#define S6_GMAC_STATRUND 0x0CC
+#define S6_GMAC_STATRUND_SIZE 12
+#define S6_GMAC_STATROVR 0x0D0
+#define S6_GMAC_STATROVR_SIZE 12
+#define S6_GMAC_STATRFRG 0x0D4
+#define S6_GMAC_STATRFRG_SIZE 12
+#define S6_GMAC_STATRJBR 0x0D8
+#define S6_GMAC_STATRJBR_SIZE 12
+#define S6_GMAC_STATRDRP 0x0DC
+#define S6_GMAC_STATRDRP_SIZE 12
+#define S6_GMAC_STATTBYT 0x0E0
+#define S6_GMAC_STATTBYT_SIZE 24
+#define S6_GMAC_STATTPKT 0x0E4
+#define S6_GMAC_STATTPKT_SIZE 18
+#define S6_GMAC_STATTMCA 0x0E8
+#define S6_GMAC_STATTMCA_SIZE 18
+#define S6_GMAC_STATTBCA 0x0EC
+#define S6_GMAC_STATTBCA_SIZE 18
+#define S6_GMAC_STATTXPF 0x0F0
+#define S6_GMAC_STATTXPF_SIZE 12
+#define S6_GMAC_STATTDFR 0x0F4
+#define S6_GMAC_STATTDFR_SIZE 12
+#define S6_GMAC_STATTEDF 0x0F8
+#define S6_GMAC_STATTEDF_SIZE 12
+#define S6_GMAC_STATTSCL 0x0FC
+#define S6_GMAC_STATTSCL_SIZE 12
+#define S6_GMAC_STATTMCL 0x100
+#define S6_GMAC_STATTMCL_SIZE 12
+#define S6_GMAC_STATTLCL 0x104
+#define S6_GMAC_STATTLCL_SIZE 12
+#define S6_GMAC_STATTXCL 0x108
+#define S6_GMAC_STATTXCL_SIZE 12
+#define S6_GMAC_STATTNCL 0x10C
+#define S6_GMAC_STATTNCL_SIZE 13
+#define S6_GMAC_STATTPFH 0x110
+#define S6_GMAC_STATTPFH_SIZE 12
+#define S6_GMAC_STATTDRP 0x114
+#define S6_GMAC_STATTDRP_SIZE 12
+#define S6_GMAC_STATTJBR 0x118
+#define S6_GMAC_STATTJBR_SIZE 12
+#define S6_GMAC_STATTFCS 0x11C
+#define S6_GMAC_STATTFCS_SIZE 12
+#define S6_GMAC_STATTXCF 0x120
+#define S6_GMAC_STATTXCF_SIZE 12
+#define S6_GMAC_STATTOVR 0x124
+#define S6_GMAC_STATTOVR_SIZE 12
+#define S6_GMAC_STATTUND 0x128
+#define S6_GMAC_STATTUND_SIZE 12
+#define S6_GMAC_STATTFRG 0x12C
+#define S6_GMAC_STATTFRG_SIZE 12
+#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
+#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
+#define S6_GMAC_STATCARRY1_RDRP 0
+#define S6_GMAC_STATCARRY1_RJBR 1
+#define S6_GMAC_STATCARRY1_RFRG 2
+#define S6_GMAC_STATCARRY1_ROVR 3
+#define S6_GMAC_STATCARRY1_RUND 4
+#define S6_GMAC_STATCARRY1_RCSE 5
+#define S6_GMAC_STATCARRY1_RCDE 6
+#define S6_GMAC_STATCARRY1_RFLR 7
+#define S6_GMAC_STATCARRY1_RALN 8
+#define S6_GMAC_STATCARRY1_RXUO 9
+#define S6_GMAC_STATCARRY1_RXPF 10
+#define S6_GMAC_STATCARRY1_RXCF 11
+#define S6_GMAC_STATCARRY1_RBCA 12
+#define S6_GMAC_STATCARRY1_RMCA 13
+#define S6_GMAC_STATCARRY1_RFCS 14
+#define S6_GMAC_STATCARRY1_RPKT 15
+#define S6_GMAC_STATCARRY1_RBYT 16
+#define S6_GMAC_STATCARRY1_TRMGV 25
+#define S6_GMAC_STATCARRY1_TRMAX 26
+#define S6_GMAC_STATCARRY1_TR1K 27
+#define S6_GMAC_STATCARRY1_TR511 28
+#define S6_GMAC_STATCARRY1_TR255 29
+#define S6_GMAC_STATCARRY1_TR127 30
+#define S6_GMAC_STATCARRY1_TR64 31
+#define S6_GMAC_STATCARRY2_TDRP 0
+#define S6_GMAC_STATCARRY2_TPFH 1
+#define S6_GMAC_STATCARRY2_TNCL 2
+#define S6_GMAC_STATCARRY2_TXCL 3
+#define S6_GMAC_STATCARRY2_TLCL 4
+#define S6_GMAC_STATCARRY2_TMCL 5
+#define S6_GMAC_STATCARRY2_TSCL 6
+#define S6_GMAC_STATCARRY2_TEDF 7
+#define S6_GMAC_STATCARRY2_TDFR 8
+#define S6_GMAC_STATCARRY2_TXPF 9
+#define S6_GMAC_STATCARRY2_TBCA 10
+#define S6_GMAC_STATCARRY2_TMCA 11
+#define S6_GMAC_STATCARRY2_TPKT 12
+#define S6_GMAC_STATCARRY2_TBYT 13
+#define S6_GMAC_STATCARRY2_TFRG 14
+#define S6_GMAC_STATCARRY2_TUND 15
+#define S6_GMAC_STATCARRY2_TOVR 16
+#define S6_GMAC_STATCARRY2_TXCF 17
+#define S6_GMAC_STATCARRY2_TFCS 18
+#define S6_GMAC_STATCARRY2_TJBR 19
+
+#define S6_GMAC_HOST_PBLKCTRL 0x140
+#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
+#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
+#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
+#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
+#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
+#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
+#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
+#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
+#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
+#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
+#define S6_GMAC_HOST_INTMASK 0x144
+#define S6_GMAC_HOST_INTSTAT 0x148
+#define S6_GMAC_HOST_INT_TXBURSTOVER 3
+#define S6_GMAC_HOST_INT_TXPREWOVER 4
+#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
+#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
+#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
+#define S6_GMAC_HOST_RXFIFOHWM 0x14C
+#define S6_GMAC_HOST_CTRLFRAMXP 0x150
+#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
+#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
+
+#define S6_GMAC_BURST_PREWR 0x1B0
+#define S6_GMAC_BURST_PREWR_LEN 0
+#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_PREWR_CFE 20
+#define S6_GMAC_BURST_PREWR_PPE 21
+#define S6_GMAC_BURST_PREWR_FCS 22
+#define S6_GMAC_BURST_PREWR_PAD 23
+#define S6_GMAC_BURST_POSTRD 0x1D0
+#define S6_GMAC_BURST_POSTRD_LEN 0
+#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_POSTRD_DROP 20
+
+
+/* data handling */
+
+#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
+#define S6_NUM_RX_SKB 16
+#define S6_MAX_FRLEN 1536
+
+struct s6gmac {
+ u32 reg;
+ u32 tx_dma;
+ u32 rx_dma;
+ u32 io;
+ u8 tx_chan;
+ u8 rx_chan;
+ spinlock_t lock;
+ u8 tx_skb_i, tx_skb_o;
+ u8 rx_skb_i, rx_skb_o;
+ struct sk_buff *tx_skb[S6_NUM_TX_SKB];
+ struct sk_buff *rx_skb[S6_NUM_RX_SKB];
+ unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
+ unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
+ struct phy_device *phydev;
+ struct {
+ struct mii_bus *bus;
+ int irq[PHY_MAX_ADDR];
+ } mii;
+ struct {
+ unsigned int mbit;
+ u8 giga;
+ u8 isup;
+ u8 full;
+ } link;
+};
+
+static void s6gmac_rx_fillfifo(struct s6gmac *pd)
+{
+ struct sk_buff *skb;
+ while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB)
+ && (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan))
+ && (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
+ pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
+ s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
+ pd->io, (u32)skb->data, S6_MAX_FRLEN);
+ }
+}
+
+static void s6gmac_rx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 pfx;
+ struct sk_buff *skb;
+ while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
+ s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
+ skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
+ pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
+ if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
+ dev_kfree_skb_irq(skb);
+ } else {
+ skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
+ & S6_GMAC_BURST_POSTRD_LEN_MASK);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_rx(skb);
+ }
+ }
+}
+
+static void s6gmac_tx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
+ s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
+ dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ }
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+}
+
+struct s6gmac_statinf {
+ unsigned reg_size : 4; /* 0: unused */
+ unsigned reg_off : 6;
+ unsigned net_index : 6;
+};
+
+#define S6_STATS_B (8 * sizeof(u32))
+#define S6_STATS_C(b, r, f) [b] = { \
+ BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
+ BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
+ >= (1<<4)) + \
+ r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
+ BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
+ >= ((1<<6)-1)) + \
+ (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
+ BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
+ % sizeof(unsigned long)) + \
+ BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
+ / sizeof(unsigned long)) >= (1<<6))) + \
+ BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
+ != sizeof(unsigned long))) + \
+ (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
+
+static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
+ S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
+}, {
+ S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
+} };
+
+static void s6gmac_stats_collect(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf)
+{
+ int b;
+ for (b = 0; b < S6_STATS_B; b++) {
+ if (inf[b].reg_size) {
+ pd->stats[inf[b].net_index] +=
+ readl(pd->reg + S6_GMAC_STAT_REGS
+ + sizeof(u32) * inf[b].reg_off);
+ }
+ }
+}
+
+static void s6gmac_stats_carry(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf, u32 mask)
+{
+ int b;
+ while (mask) {
+ b = fls(mask) - 1;
+ mask &= ~(1 << b);
+ pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
+ }
+}
+
+static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
+{
+ int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
+ ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
+ return r;
+}
+
+static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
+{
+ u32 mask;
+ mask = s6gmac_stats_pending(pd, carry);
+ if (mask) {
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
+ s6gmac_stats_carry(pd, &statinf[carry][0], mask);
+ }
+}
+
+static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct s6gmac *pd = netdev_priv(dev);
+ if (!dev)
+ return IRQ_NONE;
+ spin_lock(&pd->lock);
+ if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
+ s6gmac_rx_interrupt(dev);
+ s6gmac_rx_fillfifo(pd);
+ if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
+ s6gmac_tx_interrupt(dev);
+ s6gmac_stats_interrupt(pd, 0);
+ s6gmac_stats_interrupt(pd, 1);
+ spin_unlock(&pd->lock);
+ return IRQ_HANDLED;
+}
+
+static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
+ u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
+{
+ writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
+ writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
+ writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
+ writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
+}
+
+static inline void s6gmac_stop_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ writel(0, pd->reg + S6_GMAC_MACCONF1);
+}
+
+static inline void s6gmac_init_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int is_rgmii = !!(pd->phydev->supported
+ & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
+#if 0
+ writel(1 << S6_GMAC_MACCONF1_SYNCTX |
+ 1 << S6_GMAC_MACCONF1_SYNCRX |
+ 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RESTXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESRXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
+ 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
+ pd->reg + S6_GMAC_MACCONF1);
+#endif
+ writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
+ udelay(1000);
+ writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(1 << S6_GMAC_MACCONF1_TXENA |
+ 1 << S6_GMAC_MACCONF1_RXENA |
+ (dev->flags & IFF_LOOPBACK ? 1 : 0)
+ << S6_GMAC_MACCONF1_LOOPBACK,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
+ dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
+ pd->reg + S6_GMAC_MACMAXFRAMELEN);
+ writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
+ 1 << S6_GMAC_MACCONF2_PADCRCENA |
+ 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
+ (pd->link.giga ?
+ S6_GMAC_MACCONF2_IFMODE_BYTE :
+ S6_GMAC_MACCONF2_IFMODE_NIBBLE)
+ << S6_GMAC_MACCONF2_IFMODE |
+ 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
+ pd->reg + S6_GMAC_MACCONF2);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
+ writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
+ 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_STFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
+ pd->reg + S6_GMAC_FIFOCONF0);
+ writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
+ 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
+ pd->reg + S6_GMAC_FIFOCONF3);
+ writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
+ 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_OK |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
+ 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
+ pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
+ 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
+ pd->reg + S6_GMAC_FIFOCONF5);
+ writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
+ pd->reg + S6_GMAC_FIFOCONF4);
+ s6gmac_set_dstaddr(pd, 0,
+ 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 1,
+ dev->dev_addr[5] |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[2] << 24,
+ dev->dev_addr[1] |
+ dev->dev_addr[0] << 8,
+ 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 2,
+ 0x00000000, 0x00000100, 0x00000000, 0x00000100);
+ s6gmac_set_dstaddr(pd, 3,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+}
+
+static void s6mii_enable(struct s6gmac *pd)
+{
+ writel(readl(pd->reg + S6_GMAC_MACCONF1) &
+ ~(1 << S6_GMAC_MACCONF1_SOFTRES),
+ pd->reg + S6_GMAC_MACCONF1);
+ writel((readl(pd->reg + S6_GMAC_MACMIICONF)
+ & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
+ | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
+ pd->reg + S6_GMAC_MACMIICONF);
+}
+
+static int s6mii_busy(struct s6gmac *pd, int tmo)
+{
+ while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
+ if (--tmo == 0)
+ return -ETIME;
+ udelay(64);
+ }
+ return 0;
+}
+
+static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
+ writel(0, pd->reg + S6_GMAC_MACMIICMD);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
+}
+
+static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(value, pd->reg + S6_GMAC_MACMIICTRL);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return 0;
+}
+
+static int s6mii_reset(struct mii_bus *bus)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
+ return -ETIME;
+ return 0;
+}
+
+static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
+{
+ u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
+ pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
+ switch (pd->link.mbit) {
+ case 10:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 100:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 1000:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ default:
+ return;
+ }
+ writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
+}
+
+static inline void s6gmac_linkisup(struct net_device *dev, int isup)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+
+ pd->link.full = phydev->duplex;
+ pd->link.giga = (phydev->speed == 1000);
+ if (pd->link.mbit != phydev->speed) {
+ pd->link.mbit = phydev->speed;
+ s6gmac_set_rgmii_txclock(pd);
+ }
+ pd->link.isup = isup;
+ if (isup)
+ netif_carrier_on(dev);
+ phy_print_status(phydev);
+}
+
+static void s6gmac_adjust_link(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+ if (pd->link.isup &&
+ (!phydev->link ||
+ (pd->link.mbit != phydev->speed) ||
+ (pd->link.full != phydev->duplex))) {
+ pd->link.isup = 0;
+ netif_tx_disable(dev);
+ if (!phydev->link) {
+ netif_carrier_off(dev);
+ phy_print_status(phydev);
+ }
+ }
+ if (!pd->link.isup && phydev->link) {
+ if (pd->link.full != phydev->duplex) {
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ if (phydev->duplex)
+ maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
+ else
+ maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (pd->link.giga != (phydev->speed == 1000)) {
+ u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
+ << S6_GMAC_MACCONF2_IFMODE);
+ if (phydev->speed == 1000) {
+ fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
+ << S6_GMAC_MACCONF2_IFMODE;
+ } else {
+ fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
+ << S6_GMAC_MACCONF2_IFMODE;
+ }
+ writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+ s6gmac_linkisup(dev, 1);
+ }
+}
+
+static inline int s6gmac_phy_start(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int i = 0;
+ struct phy_device *p = NULL;
+ while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR))
+ i++;
+ p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII);
+ if (IS_ERR(p)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(p);
+ }
+ p->supported &= PHY_GBIT_FEATURES;
+ p->advertising = p->supported;
+ pd->phydev = p;
+ return 0;
+}
+
+static inline void s6gmac_init_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 mask;
+ mask = 1 << S6_GMAC_STATCARRY1_RDRP |
+ 1 << S6_GMAC_STATCARRY1_RJBR |
+ 1 << S6_GMAC_STATCARRY1_RFRG |
+ 1 << S6_GMAC_STATCARRY1_ROVR |
+ 1 << S6_GMAC_STATCARRY1_RUND |
+ 1 << S6_GMAC_STATCARRY1_RCDE |
+ 1 << S6_GMAC_STATCARRY1_RFLR |
+ 1 << S6_GMAC_STATCARRY1_RALN |
+ 1 << S6_GMAC_STATCARRY1_RMCA |
+ 1 << S6_GMAC_STATCARRY1_RFCS |
+ 1 << S6_GMAC_STATCARRY1_RPKT |
+ 1 << S6_GMAC_STATCARRY1_RBYT;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
+ mask = 1 << S6_GMAC_STATCARRY2_TDRP |
+ 1 << S6_GMAC_STATCARRY2_TNCL |
+ 1 << S6_GMAC_STATCARRY2_TXCL |
+ 1 << S6_GMAC_STATCARRY2_TEDF |
+ 1 << S6_GMAC_STATCARRY2_TPKT |
+ 1 << S6_GMAC_STATCARRY2_TBYT |
+ 1 << S6_GMAC_STATCARRY2_TFRG |
+ 1 << S6_GMAC_STATCARRY2_TUND |
+ 1 << S6_GMAC_STATCARRY2_TOVR |
+ 1 << S6_GMAC_STATCARRY2_TFCS |
+ 1 << S6_GMAC_STATCARRY2_TJBR;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
+}
+
+static inline void s6gmac_init_dmac(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
+ s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
+ s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
+ s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
+}
+
+static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ dev->trans_start = jiffies;
+ writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
+ 0 << S6_GMAC_BURST_PREWR_CFE |
+ 1 << S6_GMAC_BURST_PREWR_PPE |
+ 1 << S6_GMAC_BURST_PREWR_FCS |
+ ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
+ pd->reg + S6_GMAC_BURST_PREWR);
+ s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
+ (u32)skb->data, pd->io, skb->len);
+ if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_stop_queue(dev);
+ if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
+ printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
+ pd->tx_skb_o, pd->tx_skb_i);
+ BUG();
+ }
+ pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static void s6gmac_tx_timeout(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_tx_interrupt(dev);
+ spin_unlock_irqrestore(&pd->lock, flags);
+}
+
+static int s6gmac_open(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ phy_read_status(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ pd->link.mbit = 0;
+ s6gmac_linkisup(dev, pd->phydev->link);
+ s6gmac_init_device(dev);
+ s6gmac_init_stats(dev);
+ s6gmac_init_dmac(dev);
+ s6gmac_rx_fillfifo(pd);
+ s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
+ 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
+ s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
+ 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
+ writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
+ 0 << S6_GMAC_HOST_INT_TXPREWOVER |
+ 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
+ pd->reg + S6_GMAC_HOST_INTMASK);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ phy_start(pd->phydev);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int s6gmac_stop(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ netif_stop_queue(dev);
+ phy_stop(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_init_dmac(dev);
+ s6gmac_stop_device(dev);
+ while (pd->tx_skb_i != pd->tx_skb_o)
+ dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ while (pd->rx_skb_i != pd->rx_skb_o)
+ dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static struct net_device_stats *s6gmac_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
+ int i;
+ do {
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++)
+ pd->stats[i] =
+ pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
+ s6gmac_stats_collect(pd, &statinf[0][0]);
+ s6gmac_stats_collect(pd, &statinf[1][0]);
+ i = s6gmac_stats_pending(pd, 0) |
+ s6gmac_stats_pending(pd, 1);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ } while (i);
+ st->rx_errors = st->rx_crc_errors +
+ st->rx_frame_errors +
+ st->rx_length_errors +
+ st->rx_missed_errors;
+ st->tx_errors += st->tx_aborted_errors;
+ return st;
+}
+
+static int __devinit s6gmac_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct s6gmac *pd;
+ int res;
+ unsigned long i;
+ struct mii_bus *mb;
+ dev = alloc_etherdev(sizeof(*pd));
+ if (!dev) {
+ printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n");
+ return -ENOMEM;
+ }
+ dev->open = s6gmac_open;
+ dev->stop = s6gmac_stop;
+ dev->hard_start_xmit = s6gmac_tx;
+ dev->tx_timeout = s6gmac_tx_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->get_stats = s6gmac_stats;
+ dev->irq = platform_get_irq(pdev, 0);
+ pd = netdev_priv(dev);
+ memset(pd, 0, sizeof(*pd));
+ spin_lock_init(&pd->lock);
+ pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
+ pd->tx_dma = DMA_MASK_DMAC(i);
+ pd->tx_chan = DMA_INDEX_CHNL(i);
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
+ pd->rx_dma = DMA_MASK_DMAC(i);
+ pd->rx_chan = DMA_INDEX_CHNL(i);
+ pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+ res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
+ goto errirq;
+ }
+ res = register_netdev(dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "error registering device %s\n",
+ dev->name);
+ goto errdev;
+ }
+ mb = mdiobus_alloc();
+ if (!mb) {
+ printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
+ goto errmii;
+ }
+ mb->name = "s6gmac_mii";
+ mb->read = s6mii_read;
+ mb->write = s6mii_write;
+ mb->reset = s6mii_reset;
+ mb->priv = pd;
+ snprintf(mb->id, MII_BUS_ID_SIZE, "0");
+ mb->phy_mask = ~(1 << 0);
+ mb->irq = &pd->mii.irq[0];
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ int n = platform_get_irq(pdev, i + 1);
+ if (n < 0)
+ n = PHY_POLL;
+ pd->mii.irq[i] = n;
+ }
+ mdiobus_register(mb);
+ pd->mii.bus = mb;
+ res = s6gmac_phy_start(dev);
+ if (res)
+ return res;
+ platform_set_drvdata(pdev, dev);
+ return 0;
+errmii:
+ unregister_netdev(dev);
+errdev:
+ free_irq(dev->irq, dev);
+errirq:
+ free_netdev(dev);
+ return res;
+}
+
+static int __devexit s6gmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ if (dev) {
+ struct s6gmac *pd = netdev_priv(dev);
+ mdiobus_unregister(pd->mii.bus);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return 0;
+}
+
+static struct platform_driver s6gmac_driver = {
+ .probe = s6gmac_probe,
+ .remove = __devexit_p(s6gmac_remove),
+ .driver = {
+ .name = "s6gmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s6gmac_init(void)
+{
+ printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
+ return platform_driver_register(&s6gmac_driver);
+}
+
+
+static void __exit s6gmac_exit(void)
+{
+ platform_driver_unregister(&s6gmac_driver);
+}
+
+module_init(s6gmac_init);
+module_exit(s6gmac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
+MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>");
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3717569..a906d39 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -169,10 +169,12 @@
The Linux-USB CDC Ethernet Gadget driver is an open implementation.
This driver should work with at least the following devices:
+ * Dell Wireless 5530 HSPA
* Ericsson PipeRider (all variants)
+ * Ericsson Mobile Broadband Module (all variants)
* Motorola (DM100 and SB4100)
* Broadcom Cable Modem (reference design)
- * Toshiba PCX1100U
+ * Toshiba (PCX1100U and F3507g)
* ...
This driver creates an interface named "ethX", where X depends on
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 01fd528..4a6aff5 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -533,6 +533,31 @@
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3507g ver. 2 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3607gw */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3307 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Toshiba F3507g */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Dell F3507g */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index c66b9c3..ca39ace 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -307,9 +307,10 @@
USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
.driver_info = (unsigned long) &blob_info,
}, {
- // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
- // e.g. Gumstix, current OpenZaurus, ...
- USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
+ // Linux Ethernet/RNDIS gadget, mostly on PXA, second config
+ // e.g. Gumstix, current OpenZaurus, ... or anything else
+ // that just enables this gadget option.
+ USB_DEVICE (0x0525, 0xa4a2),
.driver_info = (unsigned long) &linuxdev_info,
},
#endif
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 2138535..73acbd2 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -297,7 +297,7 @@
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
- pegasus->dr.wValue = 0;
+ pegasus->dr.wValue = cpu_to_le16(0);
pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
pegasus->dr.wLength = cpu_to_le16(3);
pegasus->ctrl_urb->transfer_buffer_length = 3;
@@ -446,11 +446,12 @@
int i;
__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
int ret;
+ __le16 le_data = cpu_to_le16(data);
set_registers(pegasus, EpromOffset, 4, d);
enable_eprom_write(pegasus);
set_register(pegasus, EpromOffset, index);
- set_registers(pegasus, EpromData, 2, &data);
+ set_registers(pegasus, EpromData, 2, &le_data);
set_register(pegasus, EpromCtrl, EPROM_WRITE);
for (i = 0; i < REG_TIMEOUT; i++) {
@@ -923,29 +924,32 @@
static inline void disable_net_traffic(pegasus_t * pegasus)
{
- int tmp = 0;
+ __le16 tmp = cpu_to_le16(0);
- set_registers(pegasus, EthCtrl0, 2, &tmp);
+ set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
}
static inline void get_interrupt_interval(pegasus_t * pegasus)
{
- __u8 data[2];
+ u16 data;
+ u8 interval;
- read_eprom_word(pegasus, 4, (__u16 *) data);
+ read_eprom_word(pegasus, 4, &data);
+ interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) {
- if (data[1] < 0x80) {
+ if (interval < 0x80) {
if (netif_msg_timer(pegasus))
dev_info(&pegasus->intf->dev, "intr interval "
"changed from %ums to %ums\n",
- data[1], 0x80);
- data[1] = 0x80;
+ interval, 0x80);
+ interval = 0x80;
+ data = (data & 0x00FF) | ((u16)interval << 8);
#ifdef PEGASUS_WRITE_EEPROM
- write_eprom_word(pegasus, 4, *(__u16 *) data);
+ write_eprom_word(pegasus, 4, data);
#endif
}
}
- pegasus->intr_interval = data[1];
+ pegasus->intr_interval = interval;
}
static void set_carrier(struct net_device *net)
@@ -1299,7 +1303,8 @@
/* Special quirk to keep the driver from handling the Belkin Bluetooth
* dongle which happens to have the same ID.
*/
- if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
+ if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
+ (udd->idProduct == cpu_to_le16(0x0121)) &&
(udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
(udd->bDeviceProtocol == 1))
return 1;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index b02f7ad..3ba3595 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1847,7 +1847,7 @@
*/
if (tdinfo->skb_dma) {
- pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN);
+ pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 55f7de0..ea04515 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -538,6 +538,7 @@
sc->iobase = mem; /* So we can unmap it on detach */
sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
sc->opmode = NL80211_IFTYPE_STATION;
+ sc->bintval = 1000;
mutex_init(&sc->lock);
spin_lock_init(&sc->rxbuflock);
spin_lock_init(&sc->txbuflock);
@@ -686,6 +687,13 @@
if (err)
return err;
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_write_config_byte(pdev, 0x41, 0);
+
err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
if (err) {
ATH5K_ERR(sc, "request_irq failed\n");
@@ -2748,9 +2756,6 @@
goto end;
}
- /* Set to a reasonable value. Note that this will
- * be set to mac80211's value at ath5k_config(). */
- sc->bintval = 1000;
ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
ret = 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9f49a32..66a6c1f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1196,8 +1196,8 @@
ath9k_hw_phy_disable(ah);
ath9k_hw_configpcipowersave(ah, 1);
- ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
ath9k_ps_restore(sc);
+ ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
}
/*******************/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index ccdf20a..170c5b3 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -87,6 +87,7 @@
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
+ u32 val;
int ret = 0;
struct ath_hw *ah;
@@ -133,6 +134,14 @@
pci_set_master(pdev);
+ /*
+ * Disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
ret = pci_request_region(pdev, 0, "ath9k");
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
@@ -239,12 +248,21 @@
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ u32 val;
int err;
err = pci_enable_device(pdev);
if (err)
return err;
pci_restore_state(pdev);
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
/* Enable LED */
ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index f99f3a7..cece1c4 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -539,11 +539,14 @@
if (ath_beacon_dtim_pending_cab(skb)) {
/*
* Remain awake waiting for buffered broadcast/multicast
- * frames.
+ * frames. If the last broadcast/multicast frame is not
+ * received properly, the next beacon frame will work as
+ * a backup trigger for returning into NETWORK SLEEP state,
+ * so we are waiting for it as well.
*/
DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB;
+ sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
return;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 635c16e..77c339f 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -288,6 +288,7 @@
u8 *eeprom;
struct timer_list watchdog;
struct work_struct reset_worker;
+ struct mutex mutex;
struct rfkill *rfkill;
char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -315,8 +316,11 @@
void *iwm_if_alloc(int sizeof_bus, struct device *dev,
struct iwm_if_ops *if_ops);
void iwm_if_free(struct iwm_priv *iwm);
+int iwm_if_add(struct iwm_priv *iwm);
+void iwm_if_remove(struct iwm_priv *iwm);
int iwm_mode_to_nl80211_iftype(int mode);
int iwm_priv_init(struct iwm_priv *iwm);
+void iwm_priv_deinit(struct iwm_priv *iwm);
void iwm_reset(struct iwm_priv *iwm);
void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
struct iwm_umac_notif_alive *alive);
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 6a2640f..8be206d 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -112,6 +112,9 @@
iwm_send_umac_stats_req(iwm, 0);
}
+int __iwm_up(struct iwm_priv *iwm);
+int __iwm_down(struct iwm_priv *iwm);
+
static void iwm_reset_worker(struct work_struct *work)
{
struct iwm_priv *iwm;
@@ -120,6 +123,19 @@
iwm = container_of(work, struct iwm_priv, reset_worker);
+ /*
+ * XXX: The iwm->mutex is introduced purely for this reset work,
+ * because the other users for iwm_up and iwm_down are only netdev
+ * ndo_open and ndo_stop which are already protected by rtnl.
+ * Please remove iwm->mutex together if iwm_reset_worker() is not
+ * required in the future.
+ */
+ if (!mutex_trylock(&iwm->mutex)) {
+ IWM_WARN(iwm, "We are in the middle of interface bringing "
+ "UP/DOWN. Skip driver resetting.\n");
+ return;
+ }
+
if (iwm->umac_profile_active) {
profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
if (profile)
@@ -128,10 +144,10 @@
IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
}
- iwm_down(iwm);
+ __iwm_down(iwm);
while (retry++ < 3) {
- ret = iwm_up(iwm);
+ ret = __iwm_up(iwm);
if (!ret)
break;
@@ -142,7 +158,7 @@
IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
kfree(profile);
- return;
+ goto out;
}
if (profile) {
@@ -151,6 +167,9 @@
iwm_send_mlme_profile(iwm);
kfree(profile);
}
+
+ out:
+ mutex_unlock(&iwm->mutex);
}
static void iwm_watchdog(unsigned long data)
@@ -215,10 +234,21 @@
init_timer(&iwm->watchdog);
iwm->watchdog.function = iwm_watchdog;
iwm->watchdog.data = (unsigned long)iwm;
+ mutex_init(&iwm->mutex);
return 0;
}
+void iwm_priv_deinit(struct iwm_priv *iwm)
+{
+ int i;
+
+ for (i = 0; i < IWM_TX_QUEUES; i++)
+ destroy_workqueue(iwm->txq[i].wq);
+
+ destroy_workqueue(iwm->rx_wq);
+}
+
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@@ -466,7 +496,7 @@
iwm_rx_free(iwm);
- cancel_delayed_work(&iwm->stats_request);
+ cancel_delayed_work_sync(&iwm->stats_request);
memset(wstats, 0, sizeof(struct iw_statistics));
wstats->qual.updated = IW_QUAL_ALL_INVALID;
@@ -511,7 +541,7 @@
return 0;
}
-int iwm_up(struct iwm_priv *iwm)
+int __iwm_up(struct iwm_priv *iwm)
{
int ret;
struct iwm_notif *notif_reboot, *notif_ack = NULL;
@@ -647,7 +677,18 @@
return -EIO;
}
-int iwm_down(struct iwm_priv *iwm)
+int iwm_up(struct iwm_priv *iwm)
+{
+ int ret;
+
+ mutex_lock(&iwm->mutex);
+ ret = __iwm_up(iwm);
+ mutex_unlock(&iwm->mutex);
+
+ return ret;
+}
+
+int __iwm_down(struct iwm_priv *iwm)
{
int ret;
@@ -678,3 +719,14 @@
return 0;
}
+
+int iwm_down(struct iwm_priv *iwm)
+{
+ int ret;
+
+ mutex_lock(&iwm->mutex);
+ ret = __iwm_down(iwm);
+ mutex_unlock(&iwm->mutex);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 68e2c3b..aaa20c6 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -114,32 +114,31 @@
iwm = wdev_to_iwm(wdev);
iwm->bus_ops = if_ops;
iwm->wdev = wdev;
- iwm_priv_init(iwm);
+
+ ret = iwm_priv_init(iwm);
+ if (ret) {
+ dev_err(dev, "failed to init iwm_priv\n");
+ goto out_wdev;
+ }
+
wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
- ndev = alloc_netdev_mq(0, "wlan%d", ether_setup,
- IWM_TX_QUEUES);
+ ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
if (!ndev) {
dev_err(dev, "no memory for network device instance\n");
- goto out_wdev;
+ goto out_priv;
}
ndev->netdev_ops = &iwm_netdev_ops;
ndev->wireless_handlers = &iwm_iw_handler_def;
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- ret = register_netdev(ndev);
- if (ret < 0) {
- dev_err(dev, "Failed to register netdev: %d\n", ret);
- goto out_ndev;
- }
-
wdev->netdev = ndev;
return iwm;
- out_ndev:
- free_netdev(ndev);
+ out_priv:
+ iwm_priv_deinit(iwm);
out_wdev:
iwm_wdev_free(iwm);
@@ -148,15 +147,29 @@
void iwm_if_free(struct iwm_priv *iwm)
{
- int i;
-
if (!iwm_to_ndev(iwm))
return;
- unregister_netdev(iwm_to_ndev(iwm));
free_netdev(iwm_to_ndev(iwm));
iwm_wdev_free(iwm);
- destroy_workqueue(iwm->rx_wq);
- for (i = 0; i < IWM_TX_QUEUES; i++)
- destroy_workqueue(iwm->txq[i].wq);
+ iwm_priv_deinit(iwm);
+}
+
+int iwm_if_add(struct iwm_priv *iwm)
+{
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ int ret;
+
+ ret = register_netdev(ndev);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwm_if_remove(struct iwm_priv *iwm)
+{
+ unregister_netdev(iwm_to_ndev(iwm));
}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index b54da67..9166818 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -454,10 +454,18 @@
INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
+ ret = iwm_if_add(iwm);
+ if (ret) {
+ dev_err(dev, "add SDIO interface failed\n");
+ goto destroy_wq;
+ }
+
dev_info(dev, "IWM SDIO probe\n");
return 0;
+ destroy_wq:
+ destroy_workqueue(hw->isr_wq);
debugfs_exit:
iwm_debugfs_exit(iwm);
if_free:
@@ -471,9 +479,10 @@
struct iwm_priv *iwm = hw_to_iwm(hw);
struct device *dev = &func->dev;
+ iwm_if_remove(iwm);
+ destroy_workqueue(hw->isr_wq);
iwm_debugfs_exit(iwm);
iwm_if_free(iwm);
- destroy_workqueue(hw->isr_wq);
sdio_set_drvdata(func, NULL);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index f0e5e94..14a19ba 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -67,6 +67,7 @@
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 151bf5b..1032d5f 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1471,11 +1471,13 @@
static void __devinit winbond_check(int io, int key)
{
- int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+ int origval, devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
+ origval = inb(io); /* Save original value */
+
/* First probe without key */
outb(0x20, io);
x_devid = inb(io + 1);
@@ -1495,6 +1497,8 @@
oldid = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
goto out; /* protection against false positives */
@@ -1505,11 +1509,15 @@
static void __devinit winbond_check2(int io, int key)
{
- int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+ int origval[3], devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
+ origval[0] = inb(io); /* Save original values */
+ origval[1] = inb(io + 1);
+ origval[2] = inb(io + 2);
+
/* First probe without the key */
outb(0x20, io + 2);
x_devid = inb(io + 2);
@@ -1528,6 +1536,10 @@
oldid = inb(io + 2);
outb(0xaa, io); /* Magic Seal */
+ outb(origval[0], io); /* in case we poked some entirely different hardware */
+ outb(origval[1], io + 1);
+ outb(origval[2], io + 2);
+
if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
goto out; /* protection against false positives */
@@ -1538,11 +1550,13 @@
static void __devinit smsc_check(int io, int key)
{
- int id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
+ int origval, id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
if (!request_region(io, 3, __func__))
return;
+ origval = inb(io); /* Save original value */
+
/* First probe without the key */
outb(0x0d, io);
x_oldid = inb(io + 1);
@@ -1566,6 +1580,8 @@
rev = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
if (x_id == id && x_oldrev == oldrev &&
x_oldid == oldid && x_rev == rev)
goto out; /* protection against false positives */
@@ -1602,11 +1618,12 @@
static void __devinit detect_and_report_it87(void)
{
u16 dev;
- u8 r;
+ u8 origval, r;
if (verbose_probing)
printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
- if (!request_region(0x2e, 1, __func__))
+ if (!request_region(0x2e, 2, __func__))
return;
+ origval = inb(0x2e); /* Save original value */
outb(0x87, 0x2e);
outb(0x01, 0x2e);
outb(0x55, 0x2e);
@@ -1626,8 +1643,10 @@
outb(r | 8, 0x2F);
outb(0x02, 0x2E); /* Lock */
outb(0x02, 0x2F);
+ } else {
+ outb(origval, 0x2e); /* Oops, sorry to disturb */
}
- release_region(0x2e, 1);
+ release_region(0x2e, 2);
}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
@@ -2271,6 +2290,9 @@
if (IS_ERR(pdev))
return NULL;
dev = &pdev->dev;
+
+ dev->coherent_dma_mask = DMA_BIT_MASK(24);
+ dev->dma_mask = &dev->coherent_dma_mask;
}
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index b77ae67..1ebd6b4 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,10 +2,11 @@
# Makefile for the PCI bus specific drivers.
#
-obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
+obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSFS) += slot.o
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 0f37065..db23200 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,25 @@
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
+/**
+ * pci_bus_set_ops - Set raw operations of pci bus
+ * @bus: pci bus struct
+ * @ops: new raw operations
+ *
+ * Return previous raw operations
+ */
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
+{
+ struct pci_ops *old_ops;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_lock, flags);
+ old_ops = bus->ops;
+ bus->ops = ops;
+ spin_unlock_irqrestore(&pci_lock, flags);
+ return old_ops;
+}
+EXPORT_SYMBOL(pci_bus_set_ops);
/**
* pci_read_vpd - Read one entry from Vital Product Data
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 97a8194..cef28a7 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -41,9 +41,14 @@
void *alignf_data)
{
int i, ret = -ENOMEM;
+ resource_size_t max = -1;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+ /* don't allocate too high if the pref mem doesn't support 64bit*/
+ if (!(res->flags & IORESOURCE_MEM_64))
+ max = PCIBIOS_MAX_MEM_32;
+
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
if (!r)
@@ -62,7 +67,7 @@
/* Ok, try it out.. */
ret = allocate_resource(r, res, size,
r->start ? : min,
- -1, align,
+ max, align,
alignf, alignf_data);
if (ret == 0)
break;
@@ -201,13 +206,18 @@
* Walk the given bus, including any bridged devices
* on buses under this bus. Call the provided callback
* on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ *
*/
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata)
{
struct pci_dev *dev;
struct pci_bus *bus;
struct list_head *next;
+ int retval;
bus = top;
down_read(&pci_bus_sem);
@@ -231,8 +241,10 @@
/* Run device routines with the device locked */
down(&dev->dev.sem);
- cb(dev, userdata);
+ retval = cb(dev, userdata);
up(&dev->dev.sem);
+ if (retval)
+ break;
}
up_read(&pci_bus_sem);
}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index fa3a113..7b287cb 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -267,6 +267,84 @@
}
return ret;
}
+
+static LIST_HEAD(dmar_atsr_units);
+
+static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ if (!atsru)
+ return -ENOMEM;
+
+ atsru->hdr = hdr;
+ atsru->include_all = atsr->flags & 0x1;
+
+ list_add(&atsru->list, &dmar_atsr_units);
+
+ return 0;
+}
+
+static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+{
+ int rc;
+ struct acpi_dmar_atsr *atsr;
+
+ if (atsru->include_all)
+ return 0;
+
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ rc = dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+ if (rc || !atsru->devices_cnt) {
+ list_del(&atsru->list);
+ kfree(atsru);
+ }
+
+ return rc;
+}
+
+int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+{
+ int i;
+ struct pci_bus *bus;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ list_for_each_entry(atsru, &dmar_atsr_units, list) {
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ if (atsr->segment == pci_domain_nr(dev->bus))
+ goto found;
+ }
+
+ return 0;
+
+found:
+ for (bus = dev->bus; bus; bus = bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (!bridge || !bridge->is_pcie ||
+ bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+ for (i = 0; i < atsru->devices_cnt; i++)
+ if (atsru->devices[i] == bridge)
+ return 1;
+ break;
+ }
+ }
+
+ if (atsru->include_all)
+ return 1;
+
+ return 0;
+}
#endif
static void __init
@@ -274,22 +352,28 @@
{
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_atsr *atsr;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd = (struct acpi_dmar_hardware_unit *)header;
+ drhd = container_of(header, struct acpi_dmar_hardware_unit,
+ header);
printk (KERN_INFO PREFIX
- "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
- drhd->flags, (unsigned long long)drhd->address);
+ "DRHD base: %#016Lx flags: %#x\n",
+ (unsigned long long)drhd->address, drhd->flags);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- rmrr = (struct acpi_dmar_reserved_memory *)header;
-
+ rmrr = container_of(header, struct acpi_dmar_reserved_memory,
+ header);
printk (KERN_INFO PREFIX
- "RMRR base: 0x%016Lx end: 0x%016Lx\n",
+ "RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
+ case ACPI_DMAR_TYPE_ATSR:
+ atsr = container_of(header, struct acpi_dmar_atsr, header);
+ printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+ break;
}
}
@@ -363,6 +447,11 @@
ret = dmar_parse_one_rmrr(entry_header);
#endif
break;
+ case ACPI_DMAR_TYPE_ATSR:
+#ifdef CONFIG_DMAR
+ ret = dmar_parse_one_atsr(entry_header);
+#endif
+ break;
default:
printk(KERN_WARNING PREFIX
"Unknown DMAR structure type\n");
@@ -431,11 +520,19 @@
#ifdef CONFIG_DMAR
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
+ struct dmar_atsr_unit *atsr, *atsr_n;
+
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
+
+ list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ ret = atsr_parse_dev(atsr);
+ if (ret)
+ return ret;
+ }
}
#endif
@@ -468,6 +565,9 @@
#ifdef CONFIG_DMAR
if (list_empty(&dmar_rmrr_units))
printk(KERN_INFO PREFIX "No RMRR found\n");
+
+ if (list_empty(&dmar_atsr_units))
+ printk(KERN_INFO PREFIX "No ATSR found\n");
#endif
#ifdef CONFIG_INTR_REMAP
@@ -515,6 +615,7 @@
u32 ver;
static int iommu_allocated = 0;
int agaw = 0;
+ int msagaw = 0;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -535,12 +636,20 @@
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
- "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
+ goto error;
+ }
+ msagaw = iommu_calculate_max_sagaw(iommu);
+ if (msagaw < 0) {
+ printk(KERN_ERR
+ "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto error;
}
#endif
iommu->agaw = agaw;
+ iommu->msagaw = msagaw;
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -590,7 +699,8 @@
*/
static inline void reclaim_free_desc(struct q_inval *qi)
{
- while (qi->desc_status[qi->free_tail] == QI_DONE) {
+ while (qi->desc_status[qi->free_tail] == QI_DONE ||
+ qi->desc_status[qi->free_tail] == QI_ABORT) {
qi->desc_status[qi->free_tail] = QI_FREE;
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
qi->free_cnt++;
@@ -600,10 +710,13 @@
static int qi_check_fault(struct intel_iommu *iommu, int index)
{
u32 fault;
- int head;
+ int head, tail;
struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH;
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+
fault = readl(iommu->reg + DMAR_FSTS_REG);
/*
@@ -613,7 +726,11 @@
*/
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
- if ((head >> 4) == index) {
+ if ((head >> DMAR_IQ_SHIFT) == index) {
+ printk(KERN_ERR "VT-d detected invalid descriptor: "
+ "low=%llx, high=%llx\n",
+ (unsigned long long)qi->desc[index].low,
+ (unsigned long long)qi->desc[index].high);
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
__iommu_flush_cache(iommu, &qi->desc[index],
@@ -623,6 +740,32 @@
}
}
+ /*
+ * If ITE happens, all pending wait_desc commands are aborted.
+ * No new descriptors are fetched until the ITE is cleared.
+ */
+ if (fault & DMA_FSTS_ITE) {
+ head = readl(iommu->reg + DMAR_IQH_REG);
+ head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+ head |= 1;
+ tail = readl(iommu->reg + DMAR_IQT_REG);
+ tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+
+ writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+
+ do {
+ if (qi->desc_status[head] == QI_IN_USE)
+ qi->desc_status[head] = QI_ABORT;
+ head = (head - 2 + QI_LENGTH) % QI_LENGTH;
+ } while (head != tail);
+
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+ }
+
+ if (fault & DMA_FSTS_ICE)
+ writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+
return 0;
}
@@ -632,7 +775,7 @@
*/
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{
- int rc = 0;
+ int rc;
struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc;
int wait_index, index;
@@ -643,6 +786,9 @@
hw = qi->desc;
+restart:
+ rc = 0;
+
spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -673,7 +819,7 @@
* update the HW tail register indicating the presence of
* new descriptors.
*/
- writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
+ writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
while (qi->desc_status[wait_index] != QI_DONE) {
/*
@@ -685,18 +831,21 @@
*/
rc = qi_check_fault(iommu, index);
if (rc)
- goto out;
+ break;
spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
}
-out:
- qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
+
+ qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags);
+ if (rc == -EAGAIN)
+ goto restart;
+
return rc;
}
@@ -714,41 +863,26 @@
qi_submit_sync(&desc, iommu);
}
-int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
- u64 type, int non_present_entry_flush)
+void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+ u64 type)
{
struct qi_desc desc;
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;
- return qi_submit_sync(&desc, iommu);
+ qi_submit_sync(&desc, iommu);
}
-int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- int non_present_entry_flush)
+void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type)
{
u8 dw = 0, dr = 0;
struct qi_desc desc;
int ih = 0;
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
if (cap_write_drain(iommu->cap))
dw = 1;
@@ -760,7 +894,28 @@
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);
- return qi_submit_sync(&desc, iommu);
+ qi_submit_sync(&desc, iommu);
+}
+
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ u64 addr, unsigned mask)
+{
+ struct qi_desc desc;
+
+ if (mask) {
+ BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+ addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+ } else
+ desc.high = QI_DEV_IOTLB_ADDR(addr);
+
+ if (qdep >= QI_DEV_IOTLB_MAX_INVS)
+ qdep = 0;
+
+ desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+ QI_DIOTLB_TYPE;
+
+ qi_submit_sync(&desc, iommu);
}
/*
@@ -790,7 +945,6 @@
cpu_relax();
iommu->gcmd &= ~DMA_GCMD_QIE;
-
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
@@ -804,7 +958,7 @@
*/
static void __dmar_enable_qi(struct intel_iommu *iommu)
{
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
struct q_inval *qi = iommu->qi;
@@ -818,9 +972,8 @@
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
- cmd = iommu->gcmd | DMA_GCMD_QIE;
iommu->gcmd |= DMA_GCMD_QIE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
@@ -1096,7 +1249,7 @@
set_irq_data(irq, NULL);
iommu->irq = 0;
destroy_irq(irq);
- return 0;
+ return ret;
}
ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 9aa4fe1..66f29bc 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -4,7 +4,7 @@
menuconfig HOTPLUG_PCI
tristate "Support for PCI Hotplug"
- depends on PCI && HOTPLUG
+ depends on PCI && HOTPLUG && SYSFS
---help---
Say Y here if you have a motherboard with a PCI Hotplug controller.
This allows you to add and remove PCI cards while the machine is
@@ -41,7 +41,7 @@
config HOTPLUG_PCI_COMPAQ
tristate "Compaq PCI Hotplug driver"
- depends on X86 && PCI_BIOS && PCI_LEGACY
+ depends on X86 && PCI_BIOS
help
Say Y here if you have a motherboard with a Compaq PCI Hotplug
controller.
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index fbc63d5..eb15958 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -354,7 +354,7 @@
status = acpi_run_hpp(handle, hpp);
if (ACPI_SUCCESS(status))
break;
- if (acpi_root_bridge(handle))
+ if (acpi_is_root_bridge(handle))
break;
status = acpi_get_parent(handle, &phandle);
if (ACPI_FAILURE(status))
@@ -428,7 +428,7 @@
status = acpi_run_oshp(handle);
if (ACPI_SUCCESS(status))
goto got_one;
- if (acpi_root_bridge(handle))
+ if (acpi_is_root_bridge(handle))
break;
chandle = handle;
status = acpi_get_parent(chandle, &handle);
@@ -449,42 +449,6 @@
}
EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
-/* acpi_root_bridge - check to see if this acpi object is a root bridge
- *
- * @handle - the acpi object in question.
- */
-int acpi_root_bridge(acpi_handle handle)
-{
- acpi_status status;
- struct acpi_device_info *info;
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- int i;
-
- status = acpi_get_object_info(handle, &buffer);
- if (ACPI_SUCCESS(status)) {
- info = buffer.pointer;
- if ((info->valid & ACPI_VALID_HID) &&
- !strcmp(PCI_ROOT_HID_STRING,
- info->hardware_id.value)) {
- kfree(buffer.pointer);
- return 1;
- }
- if (info->valid & ACPI_VALID_CID) {
- for (i=0; i < info->compatibility_id.count; i++) {
- if (!strcmp(PCI_ROOT_HID_STRING,
- info->compatibility_id.id[i].value)) {
- kfree(buffer.pointer);
- return 1;
- }
- }
- }
- kfree(buffer.pointer);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_root_bridge);
-
-
static int is_ejectable(acpi_handle handle)
{
acpi_status status;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 43c10bd..4dd7114 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -77,7 +77,6 @@
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3a6064b..0cb0f83 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -678,18 +678,9 @@
static struct pci_dev * get_apic_pci_info(acpi_handle handle)
{
- struct acpi_pci_id id;
- struct pci_bus *bus;
struct pci_dev *dev;
- if (ACPI_FAILURE(acpi_get_pci_id(handle, &id)))
- return NULL;
-
- bus = pci_find_bus(id.segment, id.bus);
- if (!bus)
- return NULL;
-
- dev = pci_get_slot(bus, PCI_DEVFN(id.device, id.function));
+ dev = acpi_get_pci_dev(handle);
if (!dev)
return NULL;
@@ -1396,19 +1387,16 @@
/* Program resources in newly inserted bridge */
static int acpiphp_configure_bridge (acpi_handle handle)
{
- struct acpi_pci_id pci_id;
+ struct pci_dev *dev;
struct pci_bus *bus;
- if (ACPI_FAILURE(acpi_get_pci_id(handle, &pci_id))) {
+ dev = acpi_get_pci_dev(handle);
+ if (!dev) {
err("cannot get PCI domain and bus number for bridge\n");
return -EINVAL;
}
- bus = pci_find_bus(pci_id.segment, pci_id.bus);
- if (!bus) {
- err("cannot find bus %d:%d\n",
- pci_id.segment, pci_id.bus);
- return -EINVAL;
- }
+
+ bus = dev->bus;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
@@ -1416,6 +1404,7 @@
acpiphp_set_hpp_values(handle, bus);
pci_enable_bridges(bus);
acpiphp_configure_ioapics(handle);
+ pci_dev_put(dev);
return 0;
}
@@ -1631,7 +1620,7 @@
{
int *count = (int *)context;
- if (acpi_root_bridge(handle)) {
+ if (acpi_is_root_bridge(handle)) {
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge, NULL);
(*count)++;
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index de94f4f..a5b9f6a 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -72,7 +72,6 @@
static int get_latch_status(struct hotplug_slot *slot, u8 * value);
static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index afaf8f6..5383600 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -150,25 +150,25 @@
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
- SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
+ SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
MISC = offsetof(struct ctrl_reg, misc),
LED_CONTROL = offsetof(struct ctrl_reg, led_control),
INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
- INT_MASK = offsetof(struct ctrl_reg, int_mask),
- CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
+ INT_MASK = offsetof(struct ctrl_reg, int_mask),
+ CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
- GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
- NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
+ GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
+ NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
- SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
- CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
+ SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
+ CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
@@ -190,7 +190,9 @@
u32 reserved2;
} __attribute__ ((packed));
-/* offsets to the hotplug resource table registers based on the above structure layout */
+/* offsets to the hotplug resource table registers based on the above
+ * structure layout
+ */
enum hrt_offsets {
SIG0 = offsetof(struct hrt, sig0),
SIG1 = offsetof(struct hrt, sig1),
@@ -217,18 +219,20 @@
u16 pre_mem_length;
} __attribute__ ((packed));
-/* offsets to the hotplug slot resource table registers based on the above structure layout */
+/* offsets to the hotplug slot resource table registers based on the above
+ * structure layout
+ */
enum slot_rt_offsets {
DEV_FUNC = offsetof(struct slot_rt, dev_func),
- PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
- SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
- MAX_BUS = offsetof(struct slot_rt, max_bus),
- IO_BASE = offsetof(struct slot_rt, io_base),
- IO_LENGTH = offsetof(struct slot_rt, io_length),
- MEM_BASE = offsetof(struct slot_rt, mem_base),
- MEM_LENGTH = offsetof(struct slot_rt, mem_length),
- PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
- PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
+ PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
+ SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
+ MAX_BUS = offsetof(struct slot_rt, max_bus),
+ IO_BASE = offsetof(struct slot_rt, io_base),
+ IO_LENGTH = offsetof(struct slot_rt, io_length),
+ MEM_BASE = offsetof(struct slot_rt, mem_base),
+ MEM_LENGTH = offsetof(struct slot_rt, mem_length),
+ PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
+ PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
};
struct pci_func {
@@ -286,8 +290,8 @@
struct controller {
struct controller *next;
u32 ctrl_int_comp;
- struct mutex crit_sect; /* critical section mutex */
- void __iomem *hpc_reg; /* cookie for our pci controller location */
+ struct mutex crit_sect; /* critical section mutex */
+ void __iomem *hpc_reg; /* cookie for our pci controller location */
struct pci_resource *mem_head;
struct pci_resource *p_mem_head;
struct pci_resource *io_head;
@@ -299,7 +303,7 @@
u8 next_event;
u8 interrupt;
u8 cfgspc_irq;
- u8 bus; /* bus number for the pci hotplug controller */
+ u8 bus; /* bus number for the pci hotplug controller */
u8 rev;
u8 slot_device_offset;
u8 first_slot;
@@ -401,46 +405,57 @@
/* debugfs functions for the hotplug controller info */
-extern void cpqhp_initialize_debugfs (void);
-extern void cpqhp_shutdown_debugfs (void);
-extern void cpqhp_create_debugfs_files (struct controller *ctrl);
-extern void cpqhp_remove_debugfs_files (struct controller *ctrl);
+extern void cpqhp_initialize_debugfs(void);
+extern void cpqhp_shutdown_debugfs(void);
+extern void cpqhp_create_debugfs_files(struct controller *ctrl);
+extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-extern void cpqhp_pushbutton_thread (unsigned long event_pointer);
-extern irqreturn_t cpqhp_ctrl_intr (int IRQ, void *data);
-extern int cpqhp_find_available_resources (struct controller *ctrl, void __iomem *rom_start);
-extern int cpqhp_event_start_thread (void);
-extern void cpqhp_event_stop_thread (void);
-extern struct pci_func *cpqhp_slot_create (unsigned char busnumber);
-extern struct pci_func *cpqhp_slot_find (unsigned char bus, unsigned char device, unsigned char index);
-extern int cpqhp_process_SI (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_process_SS (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_hardware_test (struct controller *ctrl, int test_num);
+extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
+extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+extern int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+extern int cpqhp_event_start_thread(void);
+extern void cpqhp_event_stop_thread(void);
+extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
/* resource functions */
extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
/* pci functions */
-extern int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-extern int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot);
-extern int cpqhp_save_config (struct controller *ctrl, int busnumber, int is_hot_plug);
-extern int cpqhp_save_base_addr_length (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_configure_board (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot);
-extern int cpqhp_valid_replace (struct controller *ctrl, struct pci_func * func);
-extern void cpqhp_destroy_board_resources (struct pci_func * func);
-extern int cpqhp_return_board_resources (struct pci_func * func, struct resource_lists * resources);
-extern void cpqhp_destroy_resource_list (struct resource_lists * resources);
-extern int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func);
-extern int cpqhp_unconfigure_device (struct pci_func* func);
+extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
+ int is_hot_plug);
+extern int cpqhp_save_base_addr_length(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_used_resources(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_configure_board(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_slot_config(struct controller *ctrl,
+ struct pci_func *new_slot);
+extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+extern void cpqhp_destroy_board_resources(struct pci_func *func);
+extern int cpqhp_return_board_resources (struct pci_func *func,
+ struct resource_lists *resources);
+extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
+extern int cpqhp_configure_device(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_unconfigure_device(struct pci_func *func);
/* Global variables */
extern int cpqhp_debug;
extern int cpqhp_legacy_mode;
extern struct controller *cpqhp_ctrl_list;
extern struct pci_func *cpqhp_slot_list[256];
+extern struct irq_routing_table *cpqhp_routing_table;
/* these can be gotten rid of, but for debugging they are purty */
extern u8 cpqhp_nic_irq;
@@ -449,7 +464,7 @@
/* inline functions */
-static inline char *slot_name(struct slot *slot)
+static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
}
@@ -458,9 +473,9 @@
* return_resource
*
* Puts node back in the resource list pointed to by head
- *
*/
-static inline void return_resource(struct pci_resource **head, struct pci_resource *node)
+static inline void return_resource(struct pci_resource **head,
+ struct pci_resource *node)
{
if (!node || !head)
return;
@@ -471,7 +486,7 @@
static inline void set_SOGO(struct controller *ctrl)
{
u16 misc;
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc = (misc | 0x0001) & 0xFFFB;
writew(misc, ctrl->hpc_reg + MISC);
@@ -481,7 +496,7 @@
static inline void amber_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= (0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -491,7 +506,7 @@
static inline void amber_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -504,7 +519,7 @@
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= (0x01010000L << slot);
-
+
return led_control ? 1 : 0;
}
@@ -512,7 +527,7 @@
static inline void green_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= 0x0101L << slot;
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -521,7 +536,7 @@
static inline void green_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -531,7 +546,7 @@
static inline void green_LED_blink(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
led_control |= (0x0001L << slot);
@@ -575,22 +590,21 @@
}
-/*
+/**
* get_controller_speed - find the current frequency/mode of controller.
*
* @ctrl: controller to get frequency/mode for.
*
* Returns controller speed.
- *
*/
static inline u8 get_controller_speed(struct controller *ctrl)
{
u8 curr_freq;
- u16 misc;
-
+ u16 misc;
+
if (ctrl->pcix_support) {
curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
- if ((curr_freq & 0xB0) == 0xB0)
+ if ((curr_freq & 0xB0) == 0xB0)
return PCI_SPEED_133MHz_PCIX;
if ((curr_freq & 0xA0) == 0xA0)
return PCI_SPEED_100MHz_PCIX;
@@ -602,19 +616,18 @@
return PCI_SPEED_33MHz;
}
- misc = readw(ctrl->hpc_reg + MISC);
- return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
+ misc = readw(ctrl->hpc_reg + MISC);
+ return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
}
-
-/*
+
+/**
* get_adapter_speed - find the max supported frequency/mode of adapter.
*
* @ctrl: hotplug controller.
* @hp_slot: hotplug slot where adapter is installed.
*
* Returns adapter speed.
- *
*/
static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
{
@@ -672,7 +685,8 @@
}
-static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slot)
+static inline int cpq_get_latch_status(struct controller *ctrl,
+ struct slot *slot)
{
u32 status;
u8 hp_slot;
@@ -687,7 +701,8 @@
}
-static inline int get_presence_status(struct controller *ctrl, struct slot *slot)
+static inline int get_presence_status(struct controller *ctrl,
+ struct slot *slot)
{
int presence_save = 0;
u8 hp_slot;
@@ -696,7 +711,8 @@
hp_slot = slot->device - ctrl->slot_device_offset;
tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
- presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> hp_slot) & 0x02;
+ presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
+ >> hp_slot) & 0x02;
return presence_save;
}
@@ -718,5 +734,12 @@
return retval;
}
-#endif
+#include <asm/pci_x86.h>
+static inline int cpqhp_routing_table_length(void)
+{
+ BUG_ON(cpqhp_routing_table == NULL);
+ return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
+ sizeof(struct irq_info));
+}
+#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c2e1bcb..075b4f4 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -25,8 +25,7 @@
* Send feedback to <greg@kroah.com>
*
* Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
- * Torben Mathiasen <torben.mathiasen@hp.com>
- *
+ * Torben Mathiasen <torben.mathiasen@hp.com>
*/
#include <linux/module.h>
@@ -45,7 +44,6 @@
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
/* Global variables */
@@ -53,6 +51,7 @@
int cpqhp_legacy_mode;
struct controller *cpqhp_ctrl_list; /* = NULL */
struct pci_func *cpqhp_slot_list[256];
+struct irq_routing_table *cpqhp_routing_table;
/* local variables */
static void __iomem *smbios_table;
@@ -78,33 +77,6 @@
#define CPQHPC_MODULE_MINOR 208
-static int one_time_init (void);
-static int set_attention_status (struct hotplug_slot *slot, u8 value);
-static int process_SI (struct hotplug_slot *slot);
-static int process_SS (struct hotplug_slot *slot);
-static int hardware_test (struct hotplug_slot *slot, u32 value);
-static int get_power_status (struct hotplug_slot *slot, u8 *value);
-static int get_attention_status (struct hotplug_slot *slot, u8 *value);
-static int get_latch_status (struct hotplug_slot *slot, u8 *value);
-static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-
-static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
- .set_attention_status = set_attention_status,
- .enable_slot = process_SI,
- .disable_slot = process_SS,
- .hardware_test = hardware_test,
- .get_power_status = get_power_status,
- .get_attention_status = get_attention_status,
- .get_latch_status = get_latch_status,
- .get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
-};
-
-
static inline int is_slot64bit(struct slot *slot)
{
return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
@@ -144,7 +116,7 @@
break;
}
}
-
+
if (!status)
fp = NULL;
@@ -171,7 +143,7 @@
tempdword = ctrl->first_slot;
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // Loop through slots
+ /* Loop through slots */
while (number_of_slots) {
physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
@@ -182,41 +154,42 @@
return 0;
}
-
-/* nice debugging output */
-static int pci_print_IRQ_route (void)
+static int init_cpqhp_routing_table(void)
{
- struct irq_routing_table *routing_table;
int len;
- int loop;
- u8 tbus, tdevice, tslot;
-
- routing_table = pcibios_get_irq_routing_table();
- if (routing_table == NULL) {
- err("No BIOS Routing Table??? Not good\n");
+ cpqhp_routing_table = pcibios_get_irq_routing_table();
+ if (cpqhp_routing_table == NULL)
return -ENOMEM;
- }
- len = (routing_table->size - sizeof(struct irq_routing_table)) /
- sizeof(struct irq_info);
- // Make sure I got at least one entry
+ len = cpqhp_routing_table_length();
if (len == 0) {
- kfree(routing_table);
+ kfree(cpqhp_routing_table);
+ cpqhp_routing_table = NULL;
return -1;
}
- dbg("bus dev func slot\n");
+ return 0;
+}
+/* nice debugging output */
+static void pci_print_IRQ_route(void)
+{
+ int len;
+ int loop;
+ u8 tbus, tdevice, tslot;
+
+ len = cpqhp_routing_table_length();
+
+ dbg("bus dev func slot\n");
for (loop = 0; loop < len; ++loop) {
- tbus = routing_table->slots[loop].bus;
- tdevice = routing_table->slots[loop].devfn;
- tslot = routing_table->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
}
- kfree(routing_table);
- return 0;
+ return;
}
@@ -242,9 +215,9 @@
void __iomem *p_max;
if (!smbios_table || !curr)
- return(NULL);
+ return NULL;
- // set p_max to the end of the table
+ /* set p_max to the end of the table */
p_max = smbios_start + readw(smbios_table + ST_LENGTH);
p_temp = curr;
@@ -253,20 +226,19 @@
while ((p_temp < p_max) && !bail) {
/* Look for the double NULL terminator
* The first condition is the previous byte
- * and the second is the curr */
- if (!previous_byte && !(readb(p_temp))) {
+ * and the second is the curr
+ */
+ if (!previous_byte && !(readb(p_temp)))
bail = 1;
- }
previous_byte = readb(p_temp);
p_temp++;
}
- if (p_temp < p_max) {
+ if (p_temp < p_max)
return p_temp;
- } else {
+ else
return NULL;
- }
}
@@ -292,21 +264,18 @@
if (!smbios_table)
return NULL;
- if (!previous) {
+ if (!previous)
previous = smbios_start;
- } else {
+ else
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- }
- while (previous) {
- if (readb(previous + SMBIOS_GENERIC_TYPE) != type) {
+ while (previous)
+ if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- } else {
+ else
break;
- }
- }
return previous;
}
@@ -322,144 +291,6 @@
kfree(slot);
}
-#define SLOT_NAME_SIZE 10
-
-static int ctrl_slot_setup(struct controller *ctrl,
- void __iomem *smbios_start,
- void __iomem *smbios_table)
-{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *hotplug_slot_info;
- u8 number_of_slots;
- u8 slot_device;
- u8 slot_number;
- u8 ctrl_slot;
- u32 tempdword;
- char name[SLOT_NAME_SIZE];
- void __iomem *slot_entry= NULL;
- int result = -ENOMEM;
-
- dbg("%s\n", __func__);
-
- tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
-
- number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
- slot_number = ctrl->first_slot;
-
- while (number_of_slots) {
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- goto error;
-
- slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
- GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
- hotplug_slot = slot->hotplug_slot;
-
- hotplug_slot->info =
- kzalloc(sizeof(*(hotplug_slot->info)),
- GFP_KERNEL);
- if (!hotplug_slot->info)
- goto error_hpslot;
- hotplug_slot_info = hotplug_slot->info;
-
- slot->ctrl = ctrl;
- slot->bus = ctrl->bus;
- slot->device = slot_device;
- slot->number = slot_number;
- dbg("slot->number = %u\n", slot->number);
-
- slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
- slot_entry);
-
- while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
- slot->number)) {
- slot_entry = get_SMBIOS_entry(smbios_start,
- smbios_table, 9, slot_entry);
- }
-
- slot->p_sm_slot = slot_entry;
-
- init_timer(&slot->task_event);
- slot->task_event.expires = jiffies + 5 * HZ;
- slot->task_event.function = cpqhp_pushbutton_thread;
-
- //FIXME: these capabilities aren't used but if they are
- // they need to be correctly implemented
- slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
- slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
-
- if (is_slot64bit(slot))
- slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
- if (is_slot66mhz(slot))
- slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
- if (ctrl->speed == PCI_SPEED_66MHz)
- slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
-
- ctrl_slot =
- slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
-
- // Check presence
- slot->capabilities |=
- ((((~tempdword) >> 23) |
- ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
- // Check the switch state
- slot->capabilities |=
- ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
- // Check the slot enable
- slot->capabilities |=
- ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
-
- /* register this slot with the hotplug pci core */
- hotplug_slot->release = &release_slot;
- hotplug_slot->private = slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
- hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
- hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
- hotplug_slot_info->attention_status =
- cpq_get_attention_status(ctrl, slot);
- hotplug_slot_info->latch_status =
- cpq_get_latch_status(ctrl, slot);
- hotplug_slot_info->adapter_status =
- get_presence_status(ctrl, slot);
-
- dbg("registering bus %d, dev %d, number %d, "
- "ctrl->slot_device_offset %d, slot %d\n",
- slot->bus, slot->device,
- slot->number, ctrl->slot_device_offset,
- slot_number);
- result = pci_hp_register(hotplug_slot,
- ctrl->pci_dev->bus,
- slot->device,
- name);
- if (result) {
- err("pci_hp_register failed with error %d\n", result);
- goto error_info;
- }
-
- slot->next = ctrl->slot;
- ctrl->slot = slot;
-
- number_of_slots--;
- slot_device++;
- slot_number++;
- }
-
- return 0;
-error_info:
- kfree(hotplug_slot_info);
-error_hpslot:
- kfree(hotplug_slot);
-error_slot:
- kfree(slot);
-error:
- return result;
-}
-
static int ctrl_slot_cleanup (struct controller * ctrl)
{
struct slot *old_slot, *next_slot;
@@ -476,36 +307,32 @@
cpqhp_remove_debugfs_files(ctrl);
- //Free IRQ associated with hot plug device
+ /* Free IRQ associated with hot plug device */
free_irq(ctrl->interrupt, ctrl);
- //Unmap the memory
+ /* Unmap the memory */
iounmap(ctrl->hpc_reg);
- //Finally reclaim PCI mem
+ /* Finally reclaim PCI mem */
release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
pci_resource_len(ctrl->pci_dev, 0));
- return(0);
+ return 0;
}
-//============================================================================
-// function: get_slot_mapping
-//
-// Description: Attempts to determine a logical slot mapping for a PCI
-// device. Won't work for more than one PCI-PCI bridge
-// in a slot.
-//
-// Input: u8 bus_num - bus number of PCI device
-// u8 dev_num - device number of PCI device
-// u8 *slot - Pointer to u8 where slot number will
-// be returned
-//
-// Output: SUCCESS or FAILURE
-//=============================================================================
+/**
+ * get_slot_mapping - determine logical slot mapping for PCI device
+ *
+ * Won't work for more than one PCI-PCI bridge in a slot.
+ *
+ * @bus_num - bus number of PCI device
+ * @dev_num - device number of PCI device
+ * @slot - Pointer to u8 where slot number will be returned
+ *
+ * Output: SUCCESS or FAILURE
+ */
static int
get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
u32 work;
long len;
long loop;
@@ -516,36 +343,25 @@
bridgeSlot = 0xFF;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength);
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn >> 3;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if ((tbus == bus_num) && (tdevice == dev_num)) {
*slot = tslot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
} else {
/* Did not get a match on the target PCI device. Check
- * if the current IRQ table entry is a PCI-to-PCI bridge
- * device. If so, and it's secondary bus matches the
- * bus number for the target device, I need to save the
- * bridge's slot number. If I can not find an entry for
- * the target device, I will have to assume it's on the
- * other side of the bridge, and assign it the bridge's
- * slot. */
+ * if the current IRQ table entry is a PCI-to-PCI
+ * bridge device. If so, and it's secondary bus
+ * matches the bus number for the target device, I need
+ * to save the bridge's slot number. If I can not find
+ * an entry for the target device, I will have to
+ * assume it's on the other side of the bridge, and
+ * assign it the bridge's slot.
+ */
bus->number = tbus;
pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
PCI_CLASS_REVISION, &work);
@@ -555,25 +371,23 @@
PCI_DEVFN(tdevice, 0),
PCI_PRIMARY_BUS, &work);
// See if bridge's secondary bus matches target bus.
- if (((work >> 8) & 0x000000FF) == (long) bus_num) {
+ if (((work >> 8) & 0x000000FF) == (long) bus_num)
bridgeSlot = tslot;
- }
}
}
}
- // If we got here, we didn't find an entry in the IRQ mapping table
- // for the target PCI device. If we did determine that the target
- // device is on the other side of a PCI-to-PCI bridge, return the
- // slot number for the bridge.
+ /* If we got here, we didn't find an entry in the IRQ mapping table for
+ * the target PCI device. If we did determine that the target device
+ * is on the other side of a PCI-to-PCI bridge, return the slot number
+ * for the bridge.
+ */
if (bridgeSlot != 0xFF) {
*slot = bridgeSlot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
}
- kfree(PCIIRQRoutingInfoLength);
- // Couldn't find an entry in the routing table for this PCI device
+ /* Couldn't find an entry in the routing table for this PCI device */
return -1;
}
@@ -591,32 +405,32 @@
u8 hp_slot;
if (func == NULL)
- return(1);
+ return 1;
hp_slot = func->device - ctrl->slot_device_offset;
- // Wait for exclusive access to hardware
+ /* Wait for exclusive access to hardware */
mutex_lock(&ctrl->crit_sect);
- if (status == 1) {
+ if (status == 1)
amber_LED_on (ctrl, hp_slot);
- } else if (status == 0) {
+ else if (status == 0)
amber_LED_off (ctrl, hp_slot);
- } else {
- // Done with exclusive hardware access
+ else {
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(1);
+ return 1;
}
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(0);
+ return 0;
}
@@ -719,7 +533,7 @@
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
- return cpqhp_hardware_test(ctrl, value);
+ return cpqhp_hardware_test(ctrl, value);
}
@@ -738,7 +552,7 @@
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
-
+
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_attention_status(ctrl, slot);
@@ -793,6 +607,230 @@
return 0;
}
+static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+ .set_attention_status = set_attention_status,
+ .enable_slot = process_SI,
+ .disable_slot = process_SS,
+ .hardware_test = hardware_test,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+ .get_max_bus_speed = get_max_bus_speed,
+ .get_cur_bus_speed = get_cur_bus_speed,
+};
+
+#define SLOT_NAME_SIZE 10
+
+static int ctrl_slot_setup(struct controller *ctrl,
+ void __iomem *smbios_start,
+ void __iomem *smbios_table)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *hotplug_slot_info;
+ u8 number_of_slots;
+ u8 slot_device;
+ u8 slot_number;
+ u8 ctrl_slot;
+ u32 tempdword;
+ char name[SLOT_NAME_SIZE];
+ void __iomem *slot_entry= NULL;
+ int result = -ENOMEM;
+
+ dbg("%s\n", __func__);
+
+ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
+ slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
+ slot_number = ctrl->first_slot;
+
+ while (number_of_slots) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
+ GFP_KERNEL);
+ if (!slot->hotplug_slot)
+ goto error_slot;
+ hotplug_slot = slot->hotplug_slot;
+
+ hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
+ GFP_KERNEL);
+ if (!hotplug_slot->info)
+ goto error_hpslot;
+ hotplug_slot_info = hotplug_slot->info;
+
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->bus;
+ slot->device = slot_device;
+ slot->number = slot_number;
+ dbg("slot->number = %u\n", slot->number);
+
+ slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
+ slot_entry);
+
+ while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
+ slot->number)) {
+ slot_entry = get_SMBIOS_entry(smbios_start,
+ smbios_table, 9, slot_entry);
+ }
+
+ slot->p_sm_slot = slot_entry;
+
+ init_timer(&slot->task_event);
+ slot->task_event.expires = jiffies + 5 * HZ;
+ slot->task_event.function = cpqhp_pushbutton_thread;
+
+ /*FIXME: these capabilities aren't used but if they are
+ * they need to be correctly implemented
+ */
+ slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
+ slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
+
+ if (is_slot64bit(slot))
+ slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
+ if (is_slot66mhz(slot))
+ slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
+ if (ctrl->speed == PCI_SPEED_66MHz)
+ slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
+
+ ctrl_slot =
+ slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
+
+ /* Check presence */
+ slot->capabilities |=
+ ((((~tempdword) >> 23) |
+ ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
+ /* Check the switch state */
+ slot->capabilities |=
+ ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
+ /* Check the slot enable */
+ slot->capabilities |=
+ ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
+
+ /* register this slot with the hotplug pci core */
+ hotplug_slot->release = &release_slot;
+ hotplug_slot->private = slot;
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
+ hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
+
+ hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
+ hotplug_slot_info->attention_status =
+ cpq_get_attention_status(ctrl, slot);
+ hotplug_slot_info->latch_status =
+ cpq_get_latch_status(ctrl, slot);
+ hotplug_slot_info->adapter_status =
+ get_presence_status(ctrl, slot);
+
+ dbg("registering bus %d, dev %d, number %d, "
+ "ctrl->slot_device_offset %d, slot %d\n",
+ slot->bus, slot->device,
+ slot->number, ctrl->slot_device_offset,
+ slot_number);
+ result = pci_hp_register(hotplug_slot,
+ ctrl->pci_dev->bus,
+ slot->device,
+ name);
+ if (result) {
+ err("pci_hp_register failed with error %d\n", result);
+ goto error_info;
+ }
+
+ slot->next = ctrl->slot;
+ ctrl->slot = slot;
+
+ number_of_slots--;
+ slot_device++;
+ slot_number++;
+ }
+
+ return 0;
+error_info:
+ kfree(hotplug_slot_info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return result;
+}
+
+static int one_time_init(void)
+{
+ int loop;
+ int retval = 0;
+
+ if (initialized)
+ return 0;
+
+ power_mode = 0;
+
+ retval = init_cpqhp_routing_table();
+ if (retval)
+ goto error;
+
+ if (cpqhp_debug)
+ pci_print_IRQ_route();
+
+ dbg("Initialize + Start the notification mechanism \n");
+
+ retval = cpqhp_event_start_thread();
+ if (retval)
+ goto error;
+
+ dbg("Initialize slot lists\n");
+ for (loop = 0; loop < 256; loop++)
+ cpqhp_slot_list[loop] = NULL;
+
+ /* FIXME: We also need to hook the NMI handler eventually.
+ * this also needs to be worked with Christoph
+ * register_NMI_handler();
+ */
+ /* Map rom address */
+ cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
+ if (!cpqhp_rom_start) {
+ err ("Could not ioremap memory region for ROM\n");
+ retval = -EIO;
+ goto error;
+ }
+
+ /* Now, map the int15 entry point if we are on compaq specific
+ * hardware
+ */
+ compaq_nvram_init(cpqhp_rom_start);
+
+ /* Map smbios table entry point structure */
+ smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
+ cpqhp_rom_start + ROM_PHY_LEN);
+ if (!smbios_table) {
+ err ("Could not find the SMBIOS pointer in memory\n");
+ retval = -EIO;
+ goto error_rom_start;
+ }
+
+ smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
+ readw(smbios_table + ST_LENGTH));
+ if (!smbios_start) {
+ err ("Could not ioremap memory region taken from SMBIOS values\n");
+ retval = -EIO;
+ goto error_smbios_start;
+ }
+
+ initialized = 1;
+
+ return retval;
+
+error_smbios_start:
+ iounmap(smbios_start);
+error_rom_start:
+ iounmap(cpqhp_rom_start);
+error:
+ return retval;
+}
+
static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 num_of_slots = 0;
@@ -815,7 +853,9 @@
return err;
}
- // Need to read VID early b/c it's used to differentiate CPQ and INTC discovery
+ /* Need to read VID early b/c it's used to differentiate CPQ and INTC
+ * discovery
+ */
rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
err(msg_HPC_non_compaq_or_intel);
@@ -832,217 +872,209 @@
}
/* Check for the proper subsytem ID's
- * Intel uses a different SSID programming model than Compaq.
+ * Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
*/
- if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) {
- // TODO: This code can be made to support non-Compaq or Intel subsystem IDs
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_disable_device;
- }
- dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
- if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
- err(msg_HPC_non_compaq_or_intel);
- rc = -ENODEV;
- goto err_disable_device;
- }
-
- ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
- if (!ctrl) {
- err("%s : out of memory\n", __func__);
- rc = -ENOMEM;
- goto err_disable_device;
- }
-
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_free_ctrl;
- }
-
- info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
-
- /* Set Vendor ID, so it can be accessed later from other functions */
- ctrl->vendor_id = vendor_id;
-
- switch (subsystem_vid) {
- case PCI_VENDOR_ID_COMPAQ:
- if (pdev->revision >= 0x13) { /* CIOBX */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 1;
- pci_read_config_byte(pdev, 0x41, &bus_cap);
- if (bus_cap & 0x80) {
- dbg("bus max supports 133MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
- break;
- }
- if (bus_cap & 0x40) {
- dbg("bus max supports 100MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- break;
- }
- if (bus_cap & 20) {
- dbg("bus max supports 66MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
- break;
- }
- if (bus_cap & 10) {
- dbg("bus max supports 66MHz PCI\n");
- ctrl->speed_capability = PCI_SPEED_66MHz;
- break;
- }
-
- break;
- }
-
- switch (subsystem_deviceid) {
- case PCI_SUB_HPC_ID:
- /* Original 6500/7000 implementation */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID2:
- /* First Pushbutton implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID_INTC:
- /* Third party (6500/7000) */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID3:
- /* First 66 Mhz implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_66MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID4:
- /* First PCI-X implementation, 100MHz */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 0;
- break;
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
- }
- break;
-
- case PCI_VENDOR_ID_INTEL:
- /* Check for speed capability (0=33, 1=66) */
- if (subsystem_deviceid & 0x0001) {
- ctrl->speed_capability = PCI_SPEED_66MHz;
- } else {
- ctrl->speed_capability = PCI_SPEED_33MHz;
- }
-
- /* Check for push button */
- if (subsystem_deviceid & 0x0002) {
- /* no push button */
- ctrl->push_button = 0;
- } else {
- /* push button supported */
- ctrl->push_button = 1;
- }
-
- /* Check for slot switch type (0=mechanical, 1=not mechanical) */
- if (subsystem_deviceid & 0x0004) {
- /* no switch */
- ctrl->slot_switch_type = 0;
- } else {
- /* switch */
- ctrl->slot_switch_type = 1;
- }
-
- /* PHP Status (0=De-feature PHP, 1=Normal operation) */
- if (subsystem_deviceid & 0x0008) {
- ctrl->defeature_PHP = 1; // PHP supported
- } else {
- ctrl->defeature_PHP = 0; // PHP not supported
- }
-
- /* Alternate Base Address Register Interface (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0010) {
- ctrl->alternate_base_address = 1; // supported
- } else {
- ctrl->alternate_base_address = 0; // not supported
- }
-
- /* PCI Config Space Index (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0020) {
- ctrl->pci_config_space = 1; // supported
- } else {
- ctrl->pci_config_space = 0; // not supported
- }
-
- /* PCI-X support */
- if (subsystem_deviceid & 0x0080) {
- /* PCI-X capable */
- ctrl->pcix_support = 1;
- /* Frequency of operation in PCI-X mode */
- if (subsystem_deviceid & 0x0040) {
- /* 133MHz PCI-X if bit 7 is 1 */
- ctrl->pcix_speed_capability = 1;
- } else {
- /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
- /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
- ctrl->pcix_speed_capability = 0;
- }
- } else {
- /* Conventional PCI */
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- }
- break;
-
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
- }
-
- } else {
+ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
return -ENODEV;
}
- // Tell the user that we found one.
+ /* TODO: This code can be made to support non-Compaq or Intel
+ * subsystem IDs
+ */
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_disable_device;
+ }
+ dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
+ if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_non_compaq_or_intel);
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
+
+ ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
+ if (!ctrl) {
+ err("%s : out of memory\n", __func__);
+ rc = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_free_ctrl;
+ }
+
+ info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
+
+ /* Set Vendor ID, so it can be accessed later from other
+ * functions
+ */
+ ctrl->vendor_id = vendor_id;
+
+ switch (subsystem_vid) {
+ case PCI_VENDOR_ID_COMPAQ:
+ if (pdev->revision >= 0x13) { /* CIOBX */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 1;
+ pci_read_config_byte(pdev, 0x41, &bus_cap);
+ if (bus_cap & 0x80) {
+ dbg("bus max supports 133MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 0x40) {
+ dbg("bus max supports 100MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 20) {
+ dbg("bus max supports 66MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 10) {
+ dbg("bus max supports 66MHz PCI\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ break;
+ }
+
+ break;
+ }
+
+ switch (subsystem_deviceid) {
+ case PCI_SUB_HPC_ID:
+ /* Original 6500/7000 implementation */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID2:
+ /* First Pushbutton implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID_INTC:
+ /* Third party (6500/7000) */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID3:
+ /* First 66 Mhz implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID4:
+ /* First PCI-X implementation, 100MHz */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
+ }
+ break;
+
+ case PCI_VENDOR_ID_INTEL:
+ /* Check for speed capability (0=33, 1=66) */
+ if (subsystem_deviceid & 0x0001)
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ else
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+
+ /* Check for push button */
+ if (subsystem_deviceid & 0x0002)
+ ctrl->push_button = 0;
+ else
+ ctrl->push_button = 1;
+
+ /* Check for slot switch type (0=mechanical, 1=not mechanical) */
+ if (subsystem_deviceid & 0x0004)
+ ctrl->slot_switch_type = 0;
+ else
+ ctrl->slot_switch_type = 1;
+
+ /* PHP Status (0=De-feature PHP, 1=Normal operation) */
+ if (subsystem_deviceid & 0x0008)
+ ctrl->defeature_PHP = 1; /* PHP supported */
+ else
+ ctrl->defeature_PHP = 0; /* PHP not supported */
+
+ /* Alternate Base Address Register Interface
+ * (0=not supported, 1=supported)
+ */
+ if (subsystem_deviceid & 0x0010)
+ ctrl->alternate_base_address = 1;
+ else
+ ctrl->alternate_base_address = 0;
+
+ /* PCI Config Space Index (0=not supported, 1=supported) */
+ if (subsystem_deviceid & 0x0020)
+ ctrl->pci_config_space = 1;
+ else
+ ctrl->pci_config_space = 0;
+
+ /* PCI-X support */
+ if (subsystem_deviceid & 0x0080) {
+ ctrl->pcix_support = 1;
+ if (subsystem_deviceid & 0x0040)
+ /* 133MHz PCI-X if bit 7 is 1 */
+ ctrl->pcix_speed_capability = 1;
+ else
+ /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
+ /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
+ ctrl->pcix_speed_capability = 0;
+ } else {
+ /* Conventional PCI */
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ }
+ break;
+
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
+ }
+
+ /* Tell the user that we found one. */
info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
pdev->bus->number);
@@ -1087,7 +1119,7 @@
if (rc) {
goto err_free_bus;
}
-
+
dbg("pdev = %p\n", pdev);
dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
@@ -1109,7 +1141,7 @@
goto err_free_mem_region;
}
- // Check for 66Mhz operation
+ /* Check for 66Mhz operation */
ctrl->speed = get_controller_speed(ctrl);
@@ -1120,7 +1152,7 @@
*
********************************************************/
- // find the physical slot number of the first hot plug slot
+ /* find the physical slot number of the first hot plug slot */
/* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
@@ -1137,7 +1169,7 @@
goto err_iounmap;
}
- // Store PCI Config Space for all devices on this bus
+ /* Store PCI Config Space for all devices on this bus */
rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
if (rc) {
err("%s: unable to save PCI configuration data, error %d\n",
@@ -1148,7 +1180,7 @@
/*
* Get IO, memory, and IRQ resources for new devices
*/
- // The next line is required for cpqhp_find_available_resources
+ /* The next line is required for cpqhp_find_available_resources */
ctrl->interrupt = pdev->irq;
if (ctrl->interrupt < 0x10) {
cpqhp_legacy_mode = 1;
@@ -1182,7 +1214,7 @@
__func__, rc);
goto err_iounmap;
}
-
+
/* Mask all general input interrupts */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
@@ -1196,12 +1228,14 @@
goto err_iounmap;
}
- /* Enable Shift Out interrupt and clear it, also enable SERR on power fault */
+ /* Enable Shift Out interrupt and clear it, also enable SERR on power
+ * fault
+ */
temp_word = readw(ctrl->hpc_reg + MISC);
temp_word |= 0x4006;
writew(temp_word, ctrl->hpc_reg + MISC);
- // Changed 05/05/97 to clear all interrupts at start
+ /* Changed 05/05/97 to clear all interrupts at start */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -1216,13 +1250,14 @@
cpqhp_ctrl_list = ctrl;
}
- // turn off empty slots here unless command line option "ON" set
- // Wait for exclusive access to hardware
+ /* turn off empty slots here unless command line option "ON" set
+ * Wait for exclusive access to hardware
+ */
mutex_lock(&ctrl->crit_sect);
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // find first device number for the ctrl
+ /* find first device number for the ctrl */
device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
while (num_of_slots) {
@@ -1234,23 +1269,21 @@
hp_slot = func->device - ctrl->slot_device_offset;
dbg("hp_slot: %d\n", hp_slot);
- // We have to save the presence info for these slots
+ /* We have to save the presence info for these slots */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
- if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
func->switch_save = 0;
- } else {
+ else
func->switch_save = 0x10;
- }
- if (!power_mode) {
+ if (!power_mode)
if (!func->is_a_board) {
green_LED_off(ctrl, hp_slot);
slot_disable(ctrl, hp_slot);
}
- }
device++;
num_of_slots--;
@@ -1258,7 +1291,7 @@
if (!power_mode) {
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq(ctrl);
}
@@ -1269,7 +1302,7 @@
goto err_free_irq;
}
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
cpqhp_create_debugfs_files(ctrl);
@@ -1291,77 +1324,6 @@
return rc;
}
-
-static int one_time_init(void)
-{
- int loop;
- int retval = 0;
-
- if (initialized)
- return 0;
-
- power_mode = 0;
-
- retval = pci_print_IRQ_route();
- if (retval)
- goto error;
-
- dbg("Initialize + Start the notification mechanism \n");
-
- retval = cpqhp_event_start_thread();
- if (retval)
- goto error;
-
- dbg("Initialize slot lists\n");
- for (loop = 0; loop < 256; loop++) {
- cpqhp_slot_list[loop] = NULL;
- }
-
- // FIXME: We also need to hook the NMI handler eventually.
- // this also needs to be worked with Christoph
- // register_NMI_handler();
-
- // Map rom address
- cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
- if (!cpqhp_rom_start) {
- err ("Could not ioremap memory region for ROM\n");
- retval = -EIO;
- goto error;
- }
-
- /* Now, map the int15 entry point if we are on compaq specific hardware */
- compaq_nvram_init(cpqhp_rom_start);
-
- /* Map smbios table entry point structure */
- smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
- cpqhp_rom_start + ROM_PHY_LEN);
- if (!smbios_table) {
- err ("Could not find the SMBIOS pointer in memory\n");
- retval = -EIO;
- goto error_rom_start;
- }
-
- smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
- readw(smbios_table + ST_LENGTH));
- if (!smbios_start) {
- err ("Could not ioremap memory region taken from SMBIOS values\n");
- retval = -EIO;
- goto error_smbios_start;
- }
-
- initialized = 1;
-
- return retval;
-
-error_smbios_start:
- iounmap(smbios_start);
-error_rom_start:
- iounmap(cpqhp_rom_start);
-error:
- return retval;
-}
-
-
static void __exit unload_cpqphpd(void)
{
struct pci_func *next;
@@ -1381,10 +1343,10 @@
if (ctrl->hpc_reg) {
u16 misc;
rc = read_slot_enable (ctrl);
-
+
writeb(0, ctrl->hpc_reg + SLOT_SERR);
writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc &= 0xFFFD;
writew(misc, ctrl->hpc_reg + MISC);
@@ -1464,38 +1426,34 @@
}
}
- // Stop the notification mechanism
+ /* Stop the notification mechanism */
if (initialized)
cpqhp_event_stop_thread();
- //unmap the rom address
+ /* unmap the rom address */
if (cpqhp_rom_start)
iounmap(cpqhp_rom_start);
if (smbios_start)
iounmap(smbios_start);
}
-
-
static struct pci_device_id hpcd_pci_tbl[] = {
{
/* handle any PCI Hotplug controller */
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
.class_mask = ~0,
-
+
/* no matter who makes it */
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
-
+
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
-
-
static struct pci_driver cpqhpc_driver = {
.name = "compaq_pci_hotplug",
.id_table = hpcd_pci_tbl,
@@ -1503,8 +1461,6 @@
/* remove: cpqhpc_remove_one, */
};
-
-
static int __init cpqhpc_init(void)
{
int result;
@@ -1518,7 +1474,6 @@
return result;
}
-
static void __exit cpqhpc_cleanup(void)
{
dbg("unload_cpqphpd()\n");
@@ -1529,8 +1484,5 @@
cpqhp_shutdown_debugfs();
}
-
module_init(cpqhpc_init);
module_exit(cpqhpc_cleanup);
-
-
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index cc227a8..2fa47af 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -81,14 +81,15 @@
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x1L << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
/* this is the structure that tells the worker thread
- *what to do */
+ * what to do
+ */
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
ctrl->next_event = (ctrl->next_event + 1) % 10;
taskInfo->hp_slot = hp_slot;
@@ -100,17 +101,17 @@
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
- /**********************************
+ /*
* Switch opened
- **********************************/
+ */
func->switch_save = 0;
taskInfo->event_type = INT_SWITCH_OPEN;
} else {
- /**********************************
+ /*
* Switch closed
- **********************************/
+ */
func->switch_save = 0x10;
@@ -131,9 +132,8 @@
{
struct slot *slot = ctrl->slot;
- while (slot && (slot->device != device)) {
+ while (slot && (slot->device != device))
slot = slot->next;
- }
return slot;
}
@@ -152,17 +152,17 @@
if (!change)
return 0;
- /**********************************
+ /*
* Presence Change
- **********************************/
+ */
dbg("cpqsbd: Presence/Notify input change.\n");
dbg(" Changed bits are 0x%4.4x\n", change );
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x0101 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -177,22 +177,23 @@
return 0;
/* If the switch closed, must be a button
- * If not in button mode, nevermind */
+ * If not in button mode, nevermind
+ */
if (func->switch_save && (ctrl->push_button == 1)) {
temp_word = ctrl->ctrl_int_comp >> 16;
temp_byte = (temp_word >> hp_slot) & 0x01;
temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
if (temp_byte != func->presence_save) {
- /**************************************
+ /*
* button Pressed (doesn't do anything)
- **************************************/
+ */
dbg("hp_slot %d button pressed\n", hp_slot);
taskInfo->event_type = INT_BUTTON_PRESS;
} else {
- /**********************************
+ /*
* button Released - TAKE ACTION!!!!
- **********************************/
+ */
dbg("hp_slot %d button released\n", hp_slot);
taskInfo->event_type = INT_BUTTON_RELEASE;
@@ -210,7 +211,8 @@
}
} else {
/* Switch is open, assume a presence change
- * Save the presence state */
+ * Save the presence state
+ */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
@@ -241,17 +243,17 @@
if (!change)
return 0;
- /**********************************
+ /*
* power fault
- **********************************/
+ */
info("power fault interrupt\n");
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x01 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -262,16 +264,16 @@
rc++;
if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
- /**********************************
+ /*
* power fault Cleared
- **********************************/
+ */
func->status = 0x00;
taskInfo->event_type = INT_POWER_FAULT_CLEAR;
} else {
- /**********************************
+ /*
* power fault
- **********************************/
+ */
taskInfo->event_type = INT_POWER_FAULT;
if (ctrl->rev < 4) {
@@ -432,13 +434,15 @@
/* If we got here, there the bridge requires some of the resource, but
- * we may be able to split some off of the front */
+ * we may be able to split some off of the front
+ */
node = *head;
if (node->length & (alignment -1)) {
/* this one isn't an aligned length, so we'll make a new entry
- * and split it up. */
+ * and split it up.
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -544,10 +548,10 @@
if (!(*head))
return NULL;
- if ( cpqhp_resource_sort_and_combine(head) )
+ if (cpqhp_resource_sort_and_combine(head))
return NULL;
- if ( sort_by_size(head) )
+ if (sort_by_size(head))
return NULL;
for (node = *head; node; node = node->next) {
@@ -556,7 +560,8 @@
if (node->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -581,7 +586,8 @@
/* Don't need to check if too small since we already did */
if (node->length > size) {
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -601,7 +607,8 @@
continue;
/* If we got here, then it is the right size
- * Now take it out of the list and break */
+ * Now take it out of the list and break
+ */
if (*head == node) {
*head = node->next;
} else {
@@ -642,14 +649,16 @@
return NULL;
for (max = *head; max; max = max->next) {
- /* If not big enough we could probably just bail,
- * instead we'll continue to the next. */
+ /* If not big enough we could probably just bail,
+ * instead we'll continue to the next.
+ */
if (max->length < size)
continue;
if (max->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (max->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -672,7 +681,8 @@
if ((max->base + max->length) & (size - 1)) {
/* this one isn't end aligned properly at the top
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -744,7 +754,8 @@
if (node->base & (size - 1)) {
dbg("%s: not aligned\n", __func__);
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -769,7 +780,8 @@
if (node->length > size) {
dbg("%s: too big\n", __func__);
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -886,19 +898,19 @@
u32 Diff;
u32 temp_dword;
-
+
misc = readw(ctrl->hpc_reg + MISC);
- /***************************************
+ /*
* Check to see if it was our interrupt
- ***************************************/
+ */
if (!(misc & 0x000C)) {
return IRQ_NONE;
}
if (misc & 0x0004) {
- /**********************************
+ /*
* Serial Output interrupt Pending
- **********************************/
+ */
/* Clear the interrupt */
misc |= 0x0004;
@@ -961,11 +973,8 @@
struct pci_func *next;
new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
- if (new_slot == NULL) {
- /* I'm not dead yet!
- * You will be. */
+ if (new_slot == NULL)
return new_slot;
- }
new_slot->next = NULL;
new_slot->configured = 1;
@@ -996,10 +1005,8 @@
return 1;
next = cpqhp_slot_list[old_slot->bus];
-
- if (next == NULL) {
+ if (next == NULL)
return 1;
- }
if (next == old_slot) {
cpqhp_slot_list[old_slot->bus] = old_slot->next;
@@ -1008,9 +1015,8 @@
return 0;
}
- while ((next->next != old_slot) && (next->next != NULL)) {
+ while ((next->next != old_slot) && (next->next != NULL))
next = next->next;
- }
if (next->next == old_slot) {
next->next = old_slot->next;
@@ -1040,9 +1046,8 @@
for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
next = cpqhp_slot_list[tempBus];
- while (!slot_remove(next)) {
+ while (!slot_remove(next))
next = cpqhp_slot_list[tempBus];
- }
}
next = cpqhp_slot_list[bridge->bus];
@@ -1130,39 +1135,43 @@
u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
u16 reg16;
u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
-
+
if (ctrl->speed == adapter_speed)
return 0;
-
+
/* We don't allow freq/mode changes if we find another adapter running
- * in another slot on this controller */
+ * in another slot on this controller
+ */
for(slot = ctrl->slot; slot; slot = slot->next) {
- if (slot->device == (hp_slot + ctrl->slot_device_offset))
+ if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
if (!slot->hotplug_slot || !slot->hotplug_slot->info)
continue;
- if (slot->hotplug_slot->info->adapter_status == 0)
+ if (slot->hotplug_slot->info->adapter_status == 0)
continue;
/* If another adapter is running on the same segment but at a
* lower speed/mode, we allow the new adapter to function at
- * this rate if supported */
- if (ctrl->speed < adapter_speed)
+ * this rate if supported
+ */
+ if (ctrl->speed < adapter_speed)
return 0;
return 1;
}
-
+
/* If the controller doesn't support freq/mode changes and the
- * controller is running at a higher mode, we bail */
+ * controller is running at a higher mode, we bail
+ */
if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
return 1;
-
+
/* But we allow the adapter to run at a lower rate if possible */
if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
return 0;
/* We try to set the max speed supported by both the adapter and
- * controller */
+ * controller
+ */
if (ctrl->speed_capability < adapter_speed) {
if (ctrl->speed == ctrl->speed_capability)
return 0;
@@ -1171,22 +1180,22 @@
writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
-
- set_SOGO(ctrl);
+
+ set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
-
+
if (adapter_speed != PCI_SPEED_133MHz_PCIX)
reg = 0xF5;
else
- reg = 0xF4;
+ reg = 0xF4;
pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+
reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
reg16 &= ~0x000F;
switch(adapter_speed) {
- case(PCI_SPEED_133MHz_PCIX):
+ case(PCI_SPEED_133MHz_PCIX):
reg = 0x75;
- reg16 |= 0xB;
+ reg16 |= 0xB;
break;
case(PCI_SPEED_100MHz_PCIX):
reg = 0x74;
@@ -1203,48 +1212,48 @@
default: /* 33MHz PCI 2.2 */
reg = 0x71;
break;
-
+
}
reg16 |= 0xB << 12;
writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
-
- mdelay(5);
-
+
+ mdelay(5);
+
/* Reenable interrupts */
writel(0, ctrl->hpc_reg + INT_MASK);
- pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+ pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
+
/* Restart state machine */
reg = ~0xF;
pci_read_config_byte(ctrl->pci_dev, 0x43, ®);
pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
-
+
/* Only if mode change...*/
if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
-
+
wait_for_ctrl_irq(ctrl);
mdelay(1100);
-
+
/* Restore LED/Slot state */
writel(leds, ctrl->hpc_reg + LED_CONTROL);
writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
-
+
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
ctrl->speed = adapter_speed;
slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- info("Successfully changed frequency/mode for adapter in slot %d\n",
+ info("Successfully changed frequency/mode for adapter in slot %d\n",
slot->number);
return 0;
}
-/* the following routines constitute the bulk of the
- hotplug controller logic
+/* the following routines constitute the bulk of the
+ * hotplug controller logic
*/
@@ -1268,17 +1277,17 @@
hp_slot = func->device - ctrl->slot_device_offset;
- if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) {
- /**********************************
- * The switch is open.
- **********************************/
+ /*
+ * The switch is open.
+ */
+ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
rc = INTERLOCK_OPEN;
- } else if (is_slot_enabled (ctrl, hp_slot)) {
- /**********************************
- * The board is already on
- **********************************/
+ /*
+ * The board is already on
+ */
+ else if (is_slot_enabled (ctrl, hp_slot))
rc = CARD_FUNCTIONING;
- } else {
+ else {
mutex_lock(&ctrl->crit_sect);
/* turn on board without attaching to the bus */
@@ -1299,7 +1308,7 @@
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
@@ -1352,7 +1361,8 @@
* Get slot won't work for devices behind
* bridges, but in this case it will always be
* called for the "base" bus/dev/func of an
- * adapter. */
+ * adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1377,7 +1387,8 @@
* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
- * bus/dev/func of an adapter. */
+ * bus/dev/func of an adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1434,7 +1445,8 @@
wait_for_ctrl_irq (ctrl);
/* Change bits in slot power register to force another shift out
- * NOTE: this is to work around the timer bug */
+ * NOTE: this is to work around the timer bug
+ */
temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
@@ -1443,12 +1455,12 @@
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;
-
+
/* turn off board without attaching to the bus */
disable_slot_power (ctrl, hp_slot);
@@ -1461,7 +1473,7 @@
if (rc)
return rc;
-
+
p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
/* turn on board and blink green LED */
@@ -1521,7 +1533,7 @@
}
/* All F's is an empty slot or an invalid board */
- if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */
+ if (temp_register != 0xFFFFFFFF) {
res_lists.io_head = ctrl->io_head;
res_lists.mem_head = ctrl->mem_head;
res_lists.p_mem_head = ctrl->p_mem_head;
@@ -1570,9 +1582,8 @@
index = 0;
do {
new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
- if (new_slot && !new_slot->pci_dev) {
+ if (new_slot && !new_slot->pci_dev)
cpqhp_configure_device(ctrl, new_slot);
- }
} while (new_slot);
mutex_lock(&ctrl->crit_sect);
@@ -1859,12 +1870,12 @@
info(msg_button_on, p_slot->number);
}
mutex_lock(&ctrl->crit_sect);
-
+
dbg("blink green LED and turn off amber\n");
-
+
amber_LED_off (ctrl, hp_slot);
green_LED_blink (ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -1958,7 +1969,7 @@
if (cpqhp_process_SI(ctrl, func) != 0) {
amber_LED_on(ctrl, hp_slot);
green_LED_off(ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -2079,7 +2090,7 @@
struct pci_bus *pci_bus = ctrl->pci_bus;
int physical_slot=0;
- device = func->device;
+ device = func->device;
func = cpqhp_slot_find(ctrl->bus, device, index++);
p_slot = cpqhp_find_slot(ctrl, device);
if (p_slot) {
@@ -2113,9 +2124,8 @@
/* If the VGA Enable bit is set, remove isn't
* supported */
- if (BCR & PCI_BRIDGE_CTL_VGA) {
+ if (BCR & PCI_BRIDGE_CTL_VGA)
rc = REMOVE_NOT_SUPPORTED;
- }
}
}
@@ -2183,67 +2193,67 @@
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
switch (test_num) {
- case 1:
- /* Do stuff here! */
+ case 1:
+ /* Do stuff here! */
- /* Do that funky LED thing */
- /* so we can restore them later */
- save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
- work_LED = 0x01010101;
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ /* Do that funky LED thing */
+ /* so we can restore them later */
+ save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
+ work_LED = 0x01010101;
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
- work_LED = 0x01010000;
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ work_LED = 0x00000101;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ for (loop = 0; loop < num_of_slots; loop++) {
+ set_SOGO(ctrl);
+
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
+
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED >> 16;
writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- work_LED = 0x00000101;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
-
- work_LED = 0x01010000;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- for (loop = 0; loop < num_of_slots; loop++) {
- set_SOGO(ctrl);
-
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
-
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED >> 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
-
- set_SOGO(ctrl);
-
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
-
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED << 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- work_LED = work_LED << 1;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- }
-
- /* put it back the way it was */
- writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
set_SOGO(ctrl);
- /* Wait for SOBS to be unset */
+ /* Wait for SOGO interrupt */
wait_for_ctrl_irq (ctrl);
- break;
- case 2:
- /* Do other stuff here! */
- break;
- case 3:
- /* and more... */
- break;
+
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED << 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ work_LED = work_LED << 1;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ }
+
+ /* put it back the way it was */
+ writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+ break;
+ case 2:
+ /* Do other stuff here! */
+ break;
+ case 3:
+ /* and more... */
+ break;
}
return 0;
}
@@ -2312,9 +2322,9 @@
while ((function < max_functions) && (!stop_it)) {
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
- if (ID == 0xFFFFFFFF) { /* There's nothing there. */
+ if (ID == 0xFFFFFFFF) {
function++;
- } else { /* There's something there */
+ } else {
/* Setup slot structure. */
new_slot = cpqhp_slot_create(func->bus);
@@ -2339,8 +2349,8 @@
/*
- Configuration logic that involves the hotplug data structures and
- their bookkeeping
+ * Configuration logic that involves the hotplug data structures and
+ * their bookkeeping
*/
@@ -2393,7 +2403,7 @@
if (rc)
return rc;
- if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */
+ if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* set Primary bus */
dbg("set Primary bus = %d\n", func->bus);
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2484,7 +2494,8 @@
temp_resources.irqs = &irqs;
/* Make copies of the nodes we are going to pass down so that
- * if there is a problem,we can just use these to free resources */
+ * if there is a problem,we can just use these to free resources
+ */
hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
@@ -2556,7 +2567,8 @@
temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
- /* Adjust this to compensate for extra adjustment in first loop */
+ /* Adjust this to compensate for extra adjustment in first loop
+ */
irqs.barber_pole--;
rc = 0;
@@ -2917,27 +2929,26 @@
} /* End of base register loop */
if (cpqhp_legacy_mode) {
/* Figure out which interrupt pin this function uses */
- rc = pci_bus_read_config_byte (pci_bus, devfn,
+ rc = pci_bus_read_config_byte (pci_bus, devfn,
PCI_INTERRUPT_PIN, &temp_byte);
/* If this function needs an interrupt and we are behind
* a bridge and the pin is tied to something that's
* alread mapped, set this one the same */
- if (temp_byte && resources->irqs &&
- (resources->irqs->valid_INT &
+ if (temp_byte && resources->irqs &&
+ (resources->irqs->valid_INT &
(0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
/* We have to share with something already set up */
- IRQ = resources->irqs->interrupt[(temp_byte +
+ IRQ = resources->irqs->interrupt[(temp_byte +
resources->irqs->barber_pole - 1) & 0x03];
} else {
/* Program IRQ based on card type */
rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
- if (class_code == PCI_BASE_CLASS_STORAGE) {
+ if (class_code == PCI_BASE_CLASS_STORAGE)
IRQ = cpqhp_disk_irq;
- } else {
+ else
IRQ = cpqhp_nic_irq;
- }
}
/* IRQ Line */
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index cb17488..76ba8a1 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -94,12 +94,13 @@
static void __iomem *compaq_int15_entry_point;
-static spinlock_t int15_lock; /* lock for ordering int15_bios_call() */
+/* lock for ordering int15_bios_call() */
+static spinlock_t int15_lock;
/* This is a series of function that deals with
- setting & getting the hotplug resource table in some environment variable.
-*/
+ * setting & getting the hotplug resource table in some environment variable.
+ */
/*
* We really shouldn't be doing this unless there is a _very_ good reason to!!!
@@ -113,7 +114,7 @@
if ((*used + 1) > *avail)
return(1);
-
+
*((u8*)*p_buffer) = value;
tByte = (u8**)p_buffer;
(*tByte)++;
@@ -170,10 +171,10 @@
unsigned long flags;
int op = operation;
int ret_val;
-
+
if (!compaq_int15_entry_point)
return -ENODEV;
-
+
spin_lock_irqsave(&int15_lock, flags);
__asm__ (
"xorl %%ebx,%%ebx\n" \
@@ -187,7 +188,7 @@
"D" (buffer), "m" (compaq_int15_entry_point)
: "%ebx", "%edx");
spin_unlock_irqrestore(&int15_lock, flags);
-
+
return((ret_val & 0xFF00) >> 8);
}
@@ -210,14 +211,16 @@
available = 1024;
- // Now load the EV
+ /* Now load the EV */
temp_dword = available;
rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
evbuffer_length = temp_dword;
- // We're maintaining the resource lists so write FF to invalidate old info
+ /* We're maintaining the resource lists so write FF to invalidate old
+ * info
+ */
temp_dword = 1;
rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
@@ -263,13 +266,13 @@
p_EV_header = (struct ev_hrt_header *) pFill;
ctrl = cpqhp_ctrl_list;
-
- // The revision of this structure
+
+ /* The revision of this structure */
rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
if (rc)
return(rc);
- // The number of controllers
+ /* The number of controllers */
rc = add_byte( &pFill, 1, &usedbytes, &available);
if (rc)
return(rc);
@@ -279,27 +282,27 @@
numCtrl++;
- // The bus number
+ /* The bus number */
rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
if (rc)
return(rc);
- // The device Number
+ /* The device Number */
rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // The function Number
+ /* The function Number */
rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // Skip the number of available entries
+ /* Skip the number of available entries */
rc = add_dword( &pFill, 0, &usedbytes, &available);
if (rc)
return(rc);
- // Figure out memory Available
+ /* Figure out memory Available */
resNode = ctrl->mem_head;
@@ -308,12 +311,12 @@
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -321,10 +324,10 @@
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->mem_avail = loop;
- // Figure out prefetchable memory Available
+ /* Figure out prefetchable memory Available */
resNode = ctrl->p_mem_head;
@@ -333,12 +336,12 @@
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -346,10 +349,10 @@
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->p_mem_avail = loop;
- // Figure out IO Available
+ /* Figure out IO Available */
resNode = ctrl->io_head;
@@ -358,12 +361,12 @@
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -371,10 +374,10 @@
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->io_avail = loop;
- // Figure out bus Available
+ /* Figure out bus Available */
resNode = ctrl->bus_head;
@@ -383,12 +386,12 @@
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -396,15 +399,15 @@
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->bus_avail = loop;
ctrl = ctrl->next;
}
-
+
p_EV_header->num_of_ctrl = numCtrl;
- // Now store the EV
+ /* Now store the EV */
temp_dword = usedbytes;
@@ -449,20 +452,21 @@
struct ev_hrt_header *p_EV_header;
if (!evbuffer_init) {
- // Read the resource list information in from NVRAM
+ /* Read the resource list information in from NVRAM */
if (load_HRT(rom_start))
memset (evbuffer, 0, 1024);
evbuffer_init = 1;
}
- // If we saved information in NVRAM, use it now
+ /* If we saved information in NVRAM, use it now */
p_EV_header = (struct ev_hrt_header *) evbuffer;
- // The following code is for systems where version 1.0 of this
- // driver has been loaded, but doesn't support the hardware.
- // In that case, the driver would incorrectly store something
- // in NVRAM.
+ /* The following code is for systems where version 1.0 of this
+ * driver has been loaded, but doesn't support the hardware.
+ * In that case, the driver would incorrectly store something
+ * in NVRAM.
+ */
if ((p_EV_header->Version == 2) ||
((p_EV_header->Version == 1) && !ctrl->push_flag)) {
p_byte = &(p_EV_header->next);
@@ -479,7 +483,7 @@
function = p_ev_ctrl->function;
while ((bus != ctrl->bus) ||
- (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
+ (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
(function != PCI_FUNC(ctrl->pci_dev->devfn))) {
nummem = p_ev_ctrl->mem_avail;
numpmem = p_ev_ctrl->p_mem_avail;
@@ -491,7 +495,7 @@
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
return 2;
- // Skip forward to the next entry
+ /* Skip forward to the next entry */
p_byte += (nummem + numpmem + numio + numbus) * 8;
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
@@ -629,8 +633,9 @@
ctrl->bus_head = bus_node;
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
@@ -640,14 +645,14 @@
if (rc)
return(rc);
} else {
- if ((evbuffer[0] != 0) && (!ctrl->push_flag))
+ if ((evbuffer[0] != 0) && (!ctrl->push_flag))
return 1;
}
return 0;
}
-
+
int compaq_nvram_store (void __iomem *rom_start)
{
int rc = 1;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6c0ed0f..6173b9a 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -37,7 +37,6 @@
#include "../pci.h"
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
u8 cpqhp_nic_irq;
@@ -82,14 +81,14 @@
}
-int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
+int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
{
unsigned char bus;
struct pci_bus *child;
int num;
if (func->pci_dev == NULL)
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
/* No pci device, we need to create it then */
if (func->pci_dev == NULL) {
@@ -99,7 +98,7 @@
if (num)
pci_bus_add_devices(ctrl->pci_dev->bus);
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
if (func->pci_dev == NULL) {
dbg("ERROR: pci_dev still null\n");
return 0;
@@ -112,20 +111,24 @@
pci_do_scan_bus(child);
}
+ pci_dev_put(func->pci_dev);
+
return 0;
}
-int cpqhp_unconfigure_device(struct pci_func* func)
+int cpqhp_unconfigure_device(struct pci_func* func)
{
int j;
-
+
dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_find_slot(func->bus, PCI_DEVFN(func->device, j));
- if (temp)
+ struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
+ if (temp) {
+ pci_dev_put(temp);
pci_remove_bus_device(temp);
+ }
}
return 0;
}
@@ -178,32 +181,22 @@
if (!rc)
return !rc;
- // set the Edge Level Control Register (ELCR)
+ /* set the Edge Level Control Register (ELCR) */
temp_word = inb(0x4d0);
temp_word |= inb(0x4d1) << 8;
temp_word |= 0x01 << irq_num;
- // This should only be for x86 as it sets the Edge Level Control Register
- outb((u8) (temp_word & 0xFF), 0x4d0);
- outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1);
- rc = 0;
- }
+ /* This should only be for x86 as it sets the Edge Level
+ * Control Register
+ */
+ outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
+ 0xFF00) >> 8), 0x4d1); rc = 0; }
return rc;
}
-/*
- * WTF??? This function isn't in the code, yet a function calls it, but the
- * compiler optimizes it away? strange. Here as a placeholder to keep the
- * compiler happy.
- */
-static int PCI_ScanBusNonBridge (u8 bus, u8 device)
-{
- return 0;
-}
-
static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
{
u16 tdevice;
@@ -213,11 +206,11 @@
ctrl->pci_bus->number = bus_num;
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. Not a bridge ?
+ /* Yep we got one. Not a bridge ? */
if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
*dev_num = tdevice;
dbg("found it !\n");
@@ -225,16 +218,16 @@
}
}
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. bridge ?
+ /* Yep we got one. bridge ? */
if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
+ /* XXX: no recursion, wtf? */
dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
- if (PCI_ScanBusNonBridge(tbus, tdevice) == 0)
- return 0;
+ return 0;
}
}
@@ -244,39 +237,23 @@
static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
- long len;
- long loop;
+ int loop, len;
u32 work;
-
u8 tbus, tdevice, tslot;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength );
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if (tslot == slot) {
*bus_num = tbus;
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff)) {
- kfree(PCIIRQRoutingInfoLength );
+ if (!nobridge || (work == 0xffffffff))
return 0;
- }
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
@@ -287,28 +264,26 @@
dbg("Scan bus for Non Bridge: bus %d\n", tbus);
if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
*bus_num = tbus;
- kfree(PCIIRQRoutingInfoLength );
return 0;
}
- } else {
- kfree(PCIIRQRoutingInfoLength );
+ } else
return 0;
- }
-
}
}
- kfree(PCIIRQRoutingInfoLength );
return -1;
}
int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
{
- return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); //plain (bridges allowed)
+ /* plain (bridges allowed) */
+ return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
}
-/* More PCI configuration routines; this time centered around hotplug controller */
+/* More PCI configuration routines; this time centered around hotplug
+ * controller
+ */
/*
@@ -339,12 +314,12 @@
int stop_it;
int index;
- // Decide which slots are supported
+ /* Decide which slots are supported */
if (is_hot_plug) {
- //*********************************
- // is_hot_plug is the slot mask
- //*********************************
+ /*
+ * is_hot_plug is the slot mask
+ */
FirstSupported = is_hot_plug >> 4;
LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
} else {
@@ -352,123 +327,127 @@
LastSupported = 0x1F;
}
- // Save PCI configuration space for all devices in supported slots
+ /* Save PCI configuration space for all devices in supported slots */
ctrl->pci_bus->number = busnumber;
for (device = FirstSupported; device <= LastSupported; device++) {
ID = 0xFFFFFFFF;
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
- if (ID != 0xFFFFFFFF) { // device in slot
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
- if (rc)
- return rc;
-
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
-
- // If multi-function device, set max_functions to 8
- if (header_type & 0x80)
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- DevError = 0;
-
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // P-P Bridge
- // Recurse the subordinate bus
- // get the subordinate bus number
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
- if (rc) {
- return rc;
- } else {
- sub_bus = (int) secondary_bus;
-
- // Save secondary bus cfg spc
- // with this recursive call.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return rc;
- ctrl->pci_bus->number = busnumber;
- }
- }
-
- index = 0;
- new_slot = cpqhp_slot_find(busnumber, device, index++);
- while (new_slot &&
- (new_slot->function != (u8) function))
- new_slot = cpqhp_slot_find(busnumber, device, index++);
-
- if (!new_slot) {
- // Setup slot structure.
- new_slot = cpqhp_slot_create(busnumber);
-
- if (new_slot == NULL)
- return(1);
- }
+ if (ID == 0xFFFFFFFF) {
+ if (is_hot_plug) {
+ /* Setup slot structure with entry for empty
+ * slot
+ */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
new_slot->bus = (u8) busnumber;
new_slot->device = (u8) device;
- new_slot->function = (u8) function;
- new_slot->is_a_board = 1;
- new_slot->switch_save = 0x10;
- // In case of unsupported board
- new_slot->status = DevError;
- new_slot->pci_dev = pci_find_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
+ new_slot->function = 0;
+ new_slot->is_a_board = 0;
+ new_slot->presence_save = 0;
+ new_slot->switch_save = 0;
+ }
+ continue;
+ }
- for (cloop = 0; cloop < 0x20; cloop++) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
+ if (rc)
+ return rc;
+
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
+
+ /* If multi-function device, set max_functions to 8 */
+ if (header_type & 0x80)
+ max_functions = 8;
+ else
+ max_functions = 1;
+
+ function = 0;
+
+ do {
+ DevError = 0;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus
+ * get the subordinate bus number
+ */
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (rc) {
+ return rc;
+ } else {
+ sub_bus = (int) secondary_bus;
+
+ /* Save secondary bus cfg spc
+ * with this recursive call.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
if (rc)
return rc;
+ ctrl->pci_bus->number = busnumber;
}
+ }
- function++;
+ index = 0;
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
+ while (new_slot &&
+ (new_slot->function != (u8) function))
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
- stop_it = 0;
-
- // this loop skips to the next present function
- // reading in Class Code and Header type.
-
- while ((function < max_functions)&&(!stop_it)) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
- if (rc)
- return rc;
-
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
-
- stop_it++;
- }
- }
-
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else if (is_hot_plug) {
- // Setup slot structure with entry for empty slot
- new_slot = cpqhp_slot_create(busnumber);
-
- if (new_slot == NULL) {
- return(1);
+ if (!new_slot) {
+ /* Setup slot structure. */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
}
new_slot->bus = (u8) busnumber;
new_slot->device = (u8) device;
- new_slot->function = 0;
- new_slot->is_a_board = 0;
- new_slot->presence_save = 0;
- new_slot->switch_save = 0;
- }
- } // End of FOR loop
+ new_slot->function = (u8) function;
+ new_slot->is_a_board = 1;
+ new_slot->switch_save = 0x10;
+ /* In case of unsupported board */
+ new_slot->status = DevError;
+ new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
- return(0);
+ for (cloop = 0; cloop < 0x20; cloop++) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+ if (rc)
+ return rc;
+ }
+
+ pci_dev_put(new_slot->pci_dev);
+
+ function++;
+
+ stop_it = 0;
+
+ /* this loop skips to the next present function
+ * reading in Class Code and Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
+ if (ID == 0xFFFFFFFF) {
+ function++;
+ continue;
+ }
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
+ if (rc)
+ return rc;
+
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
+
+ stop_it++;
+ }
+
+ } while (function < max_functions);
+ } /* End of FOR loop */
+
+ return 0;
}
@@ -489,7 +468,7 @@
u8 secondary_bus;
int sub_bus;
int max_functions;
- int function;
+ int function = 0;
int cloop = 0;
int stop_it;
@@ -498,63 +477,58 @@
ctrl->pci_bus->number = new_slot->bus;
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
- if (ID != 0xFFFFFFFF) { // device in slot
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
-
- if (header_type & 0x80) // Multi-function device
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Recurse the subordinate bus
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
-
- sub_bus = (int) secondary_bus;
-
- // Save the config headers for the secondary bus.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return(rc);
- ctrl->pci_bus->number = new_slot->bus;
-
- } // End of IF
-
- new_slot->status = 0;
-
- for (cloop = 0; cloop < 0x20; cloop++) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- }
-
- function++;
-
- stop_it = 0;
-
- // this loop skips to the next present function
- // reading in the Class Code and the Header type.
-
- while ((function < max_functions) && (!stop_it)) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
-
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
-
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
-
- stop_it++;
- }
- }
-
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else {
+ if (ID == 0xFFFFFFFF)
return 2;
+
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
+
+ if (header_type & 0x80) /* Multi-function device */
+ max_functions = 8;
+ else
+ max_functions = 1;
+
+ while (function < max_functions) {
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus */
+ pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
+
+ sub_bus = (int) secondary_bus;
+
+ /* Save the config headers for the secondary
+ * bus.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
+ return(rc);
+ ctrl->pci_bus->number = new_slot->bus;
+
+ }
+
+ new_slot->status = 0;
+
+ for (cloop = 0; cloop < 0x20; cloop++)
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+
+ function++;
+
+ stop_it = 0;
+
+ /* this loop skips to the next present function
+ * reading in the Class Code and the Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
+
+ if (ID == 0xFFFFFFFF)
+ function++;
+ else {
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ stop_it++;
+ }
+ }
+
}
return 0;
@@ -590,11 +564,10 @@
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- // PCI-PCI Bridge
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -610,23 +583,27 @@
}
pci_bus->number = func->bus;
- //FIXME: this loop is duplicated in the non-bridge case. The two could be rolled together
- // Figure out IO and memory base lengths
+ /* FIXME: this loop is duplicated in the non-bridge
+ * case. The two could be rolled together Figure out
+ * IO and memory base lengths
+ */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
-
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -637,32 +614,36 @@
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] =
base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
-
- } else if ((header_type & 0x7F) == 0x00) { // PCI-PCI Bridge
- // Figure out IO and memory base lengths
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // base = amount of IO space requested
+ /* IO base
+ * base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
- // base = amount of memory space requested
+ /* memory base
+ * base = amount of memory
+ * space requested
+ */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -673,16 +654,16 @@
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] = base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
- } else { // Some other unknown header type
+ } else { /* Some other unknown header type */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -728,18 +709,18 @@
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Save the command register
+ /* Save the command register */
pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
- // disable card
+ /* disable card */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Clear Bridge Control Register
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Clear Bridge Control Register */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
@@ -755,7 +736,7 @@
bus_node->next = func->bus_head;
func->bus_head = bus_node;
- // Save IO base and Limit registers
+ /* Save IO base and Limit registers */
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
@@ -771,7 +752,7 @@
func->io_head = io_node;
}
- // Save memory base and Limit registers
+ /* Save memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
@@ -787,7 +768,7 @@
func->mem_head = mem_node;
}
- // Save prefetchable memory base and Limit registers
+ /* Save prefetchable memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
@@ -802,7 +783,7 @@
p_mem_node->next = func->p_mem_head;
func->p_mem_head = p_mem_node;
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
@@ -812,11 +793,14 @@
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -834,7 +818,7 @@
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -851,7 +835,7 @@
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -868,9 +852,10 @@
} else
return(1);
}
- } // End of base register loop
- } else if ((header_type & 0x7F) == 0x00) { // Standard header
- // Figure out IO and memory base lengths
+ } /* End of base register loop */
+ /* Standard header */
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
@@ -880,11 +865,14 @@
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -901,7 +889,7 @@
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -918,7 +906,7 @@
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -935,15 +923,14 @@
} else
return(1);
}
- } // End of base register loop
- } else { // Some other unknown header type
+ } /* End of base register loop */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
- return(0);
+ return 0;
}
@@ -975,16 +962,16 @@
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Start at the top of config space so that the control
- // registers are programmed last
- for (cloop = 0x3C; cloop > 0; cloop -= 4) {
+ /* Start at the top of config space so that the control
+ * registers are programmed last
+ */
+ for (cloop = 0x3C; cloop > 0; cloop -= 4)
pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
- }
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- // If this is a bridge device, restore subordinate devices
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
+ /* If this is a bridge device, restore subordinate devices */
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -1000,8 +987,9 @@
}
} else {
- // Check all the base Address Registers to make sure
- // they are the same. If not, the board is different.
+ /* Check all the base Address Registers to make sure
+ * they are the same. If not, the board is different.
+ */
for (cloop = 16; cloop < 40; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
@@ -1058,27 +1046,28 @@
pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
- // No adapter present
+ /* No adapter present */
if (temp_register == 0xFFFFFFFF)
return(NO_ADAPTER_PRESENT);
if (temp_register != func->config_space[0])
return(ADAPTER_NOT_SAME);
- // Check for same revision number and class code
+ /* Check for same revision number and class code */
pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
- // Adapter not the same
+ /* Adapter not the same */
if (temp_register != func->config_space[0x08 >> 2])
return(ADAPTER_NOT_SAME);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // In order to continue checking, we must program the
- // bus registers in the bridge to respond to accesses
- // for it's subordinate bus(es)
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* In order to continue checking, we must program the
+ * bus registers in the bridge to respond to accesses
+ * for its subordinate bus(es)
+ */
temp_register = func->config_space[0x18 >> 2];
pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
@@ -1096,35 +1085,39 @@
}
}
- // Check to see if it is a standard config header
+ /* Check to see if it is a standard config header */
else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
- // Check subsystem vendor and ID
+ /* Check subsystem vendor and ID */
pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
if (temp_register != func->config_space[0x2C >> 2]) {
- // If it's a SMART-2 and the register isn't filled
- // in, ignore the difference because
- // they just have an old rev of the firmware
-
+ /* If it's a SMART-2 and the register isn't
+ * filled in, ignore the difference because
+ * they just have an old rev of the firmware
+ */
if (!((func->config_space[0] == 0xAE100E11)
&& (temp_register == 0x00L)))
return(ADAPTER_NOT_SAME);
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO
+ * space requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -1135,23 +1128,24 @@
type = 0;
}
- // Check information in slot structure
+ /* Check information in slot structure */
if (func->base_length[(cloop - 0x10) >> 2] != base)
return(ADAPTER_NOT_SAME);
if (func->base_type[(cloop - 0x10) >> 2] != type)
return(ADAPTER_NOT_SAME);
- } // End of base register loop
+ } /* End of base register loop */
- } // End of (type 0 config space) else
+ } /* End of (type 0 config space) else */
else {
- // this is not a type 0 or 1 config space header so
- // we don't know how to do it
+ /* this is not a type 0 or 1 config space header so
+ * we don't know how to do it
+ */
return(DEVICE_TYPE_NOT_SUPPORTED);
}
- // Get the next function
+ /* Get the next function */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -1168,7 +1162,7 @@
* this function is for hot plug ADD!
*
* returns 0 if success
- */
+ */
int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
{
u8 temp;
@@ -1187,10 +1181,10 @@
rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
dbg("rom_resource_table = %p\n", rom_resource_table);
- if (rom_resource_table == NULL) {
+ if (rom_resource_table == NULL)
return -ENODEV;
- }
- // Sum all resources and setup resource maps
+
+ /* Sum all resources and setup resource maps */
unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
dbg("unused_IRQ = %x\n", unused_IRQ);
@@ -1222,13 +1216,11 @@
temp = 0;
- if (!cpqhp_nic_irq) {
+ if (!cpqhp_nic_irq)
cpqhp_nic_irq = ctrl->cfgspc_irq;
- }
- if (!cpqhp_disk_irq) {
+ if (!cpqhp_disk_irq)
cpqhp_disk_irq = ctrl->cfgspc_irq;
- }
dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
@@ -1262,13 +1254,13 @@
dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
primary_bus, secondary_bus, max_bus);
- // If this entry isn't for our controller's bus, ignore it
+ /* If this entry isn't for our controller's bus, ignore it */
if (primary_bus != ctrl->bus) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // find out if this entry is for an occupied slot
+ /* find out if this entry is for an occupied slot */
ctrl->pci_bus->number = primary_bus;
pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
dbg("temp_D_word = %x\n", temp_dword);
@@ -1282,13 +1274,13 @@
func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
}
- // If we can't find a match, skip this table entry
+ /* If we can't find a match, skip this table entry */
if (!func) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // this may not work and shouldn't be used
+ /* this may not work and shouldn't be used */
if (secondary_bus != primary_bus)
bridged_slot = 1;
else
@@ -1301,7 +1293,7 @@
}
- // If we've got a valid IO base, use it
+ /* If we've got a valid IO base, use it */
temp_dword = io_base + io_length;
@@ -1325,7 +1317,7 @@
}
}
- // If we've got a valid memory base, use it
+ /* If we've got a valid memory base, use it */
temp_dword = mem_base + mem_length;
if ((mem_base) && (temp_dword < 0x10000)) {
mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
@@ -1348,8 +1340,9 @@
}
}
- // If we've got a valid prefetchable memory base, and
- // the base + length isn't greater than 0xFFFF
+ /* If we've got a valid prefetchable memory base, and
+ * the base + length isn't greater than 0xFFFF
+ */
temp_dword = pre_mem_base + pre_mem_length;
if ((pre_mem_base) && (temp_dword < 0x10000)) {
p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
@@ -1372,9 +1365,10 @@
}
}
- // If we've got a valid bus number, use it
- // The second condition is to ignore bus numbers on
- // populated slots that don't have PCI-PCI bridges
+ /* If we've got a valid bus number, use it
+ * The second condition is to ignore bus numbers on
+ * populated slots that don't have PCI-PCI bridges
+ */
if (secondary_bus && (secondary_bus != primary_bus)) {
bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
if (!bus_node)
@@ -1398,8 +1392,9 @@
one_slot += sizeof (struct slot_rt);
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 42e4260..7485ffd 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1318,7 +1318,6 @@
}
struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = ibmphp_disable_slot,
@@ -1421,3 +1420,4 @@
}
module_init(ibmphp_init);
+module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 535fce0..8445804 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -347,125 +347,129 @@
.store = test_write_file
};
-static int has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_latch_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_adapter_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_max_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_max_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_cur_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_cur_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->hardware_test)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
static int fs_add_slot(struct pci_slot *slot)
{
int retval = 0;
- if (has_power_file(slot) == 0) {
- retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ /* Create symbolic link to the hotplug driver module */
+ pci_hp_create_module_link(slot);
+
+ if (has_power_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(slot) == 0) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(slot) == 0) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(slot) == 0) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_max_bus_speed_file(slot) == 0) {
+ if (has_max_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_max_bus_speed.attr);
+ &hotplug_slot_attr_max_bus_speed.attr);
if (retval)
goto exit_max_speed;
}
- if (has_cur_bus_speed_file(slot) == 0) {
+ if (has_cur_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_cur_bus_speed.attr);
+ &hotplug_slot_attr_cur_bus_speed.attr);
if (retval)
goto exit_cur_speed;
}
- if (has_test_file(slot) == 0) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -475,55 +479,61 @@
goto exit;
exit_test:
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
-
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
exit_cur_speed:
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
-
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
exit_max_speed:
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
-
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
-
exit_latch:
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
-
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
+ pci_hp_remove_module_link(slot);
exit:
return retval;
}
static void fs_remove_slot(struct pci_slot *slot)
{
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
- if (has_test_file(slot) == 0)
+ if (has_test_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+
+ pci_hp_remove_module_link(slot);
}
static struct hotplug_slot *get_slot_from_name (const char *name)
@@ -540,10 +550,10 @@
}
/**
- * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
+ * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
* @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register
- * @slot_nr: slot number
+ * @devnr: device number
* @name: name registered with kobject core
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
@@ -551,8 +561,9 @@
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
- const char *name)
+int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+ int devnr, const char *name,
+ struct module *owner, const char *mod_name)
{
int result;
struct pci_slot *pci_slot;
@@ -567,14 +578,16 @@
return -EINVAL;
}
- mutex_lock(&pci_hp_mutex);
+ slot->ops->owner = owner;
+ slot->ops->mod_name = mod_name;
+ mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
- pci_slot = pci_create_slot(bus, slot_nr, name, slot);
+ pci_slot = pci_create_slot(bus, devnr, name, slot);
if (IS_ERR(pci_slot)) {
result = PTR_ERR(pci_slot);
goto out;
@@ -684,6 +697,6 @@
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-EXPORT_SYMBOL_GPL(pci_hp_register);
+EXPORT_SYMBOL_GPL(__pci_hp_register);
EXPORT_SYMBOL_GPL(pci_hp_deregister);
EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 0a36854..e6cf096 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -81,7 +81,6 @@
struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
- unsigned long last_emi_toggle;
struct delayed_work work; /* work for button event */
struct mutex lock;
};
@@ -203,8 +202,6 @@
int (*set_attention_status)(struct slot *slot, u8 status);
int (*get_latch_status)(struct slot *slot, u8 *status);
int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_emi_status)(struct slot *slot, u8 *status);
- int (*toggle_emi)(struct slot *slot);
int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index fb254b2..2317557 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -73,7 +73,6 @@
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -85,99 +84,6 @@
.get_cur_bus_speed = get_cur_bus_speed,
};
-/*
- * Check the status of the Electro Mechanical Interlock (EMI)
- */
-static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- return (slot->hpc_ops->get_emi_status(slot, value));
-}
-
-/*
- * sysfs interface for the Electro Mechanical Interlock (EMI)
- * 1 == locked, 0 == unlocked
- */
-static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
-{
- int retval;
- u8 value;
-
- retval = get_lock_status(slot, &value);
- if (retval)
- goto lock_read_exit;
- retval = sprintf (buf, "%d\n", value);
-
-lock_read_exit:
- return retval;
-}
-
-/*
- * Change the status of the Electro Mechanical Interlock (EMI)
- * This is a toggle - in addition there must be at least 1 second
- * in between toggles.
- */
-static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
-{
- struct slot *slot = hotplug_slot->private;
- int retval;
- u8 value;
-
- mutex_lock(&slot->ctrl->crit_sect);
-
- /* has it been >1 sec since our last toggle? */
- if ((get_seconds() - slot->last_emi_toggle) < 1) {
- mutex_unlock(&slot->ctrl->crit_sect);
- return -EINVAL;
- }
-
- /* see what our current state is */
- retval = get_lock_status(hotplug_slot, &value);
- if (retval || (value == status))
- goto set_lock_exit;
-
- slot->hpc_ops->toggle_emi(slot);
-set_lock_exit:
- mutex_unlock(&slot->ctrl->crit_sect);
- return 0;
-}
-
-/*
- * sysfs interface which allows the user to toggle the Electro Mechanical
- * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
- */
-static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
- const char *buf, size_t count)
-{
- struct slot *slot = hotplug_slot->private;
- unsigned long llock;
- u8 lock;
- int retval = 0;
-
- llock = simple_strtoul(buf, NULL, 10);
- lock = (u8)(llock & 0xff);
-
- switch (lock) {
- case 0:
- case 1:
- retval = set_lock_status(hotplug_slot, lock);
- break;
- default:
- ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
- lock);
- retval = -EINVAL;
- }
- if (retval)
- return retval;
- return count;
-}
-
-static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
- .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
- .show = lock_read_file,
- .store = lock_write_file
-};
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -236,17 +142,6 @@
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
- /* create additional sysfs entries */
- if (EMI(ctrl)) {
- retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
- if (retval) {
- pci_hp_deregister(hotplug_slot);
- ctrl_err(ctrl, "Cannot create additional sysfs "
- "entries\n");
- goto error_info;
- }
- }
}
return 0;
@@ -261,13 +156,8 @@
static void cleanup_slots(struct controller *ctrl)
{
struct slot *slot;
-
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- if (EMI(ctrl))
- sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
+ list_for_each_entry(slot, &ctrl->slot_list, slot_list)
pci_hp_deregister(slot->hotplug_slot);
- }
}
/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 07bd321..5281325 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -422,35 +422,6 @@
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-static int hpc_get_emi_status(struct slot *slot, u8 *status)
-{
- struct controller *ctrl = slot->ctrl;
- u16 slot_status;
- int retval;
-
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot check EMI status\n");
- return retval;
- }
- *status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
- return retval;
-}
-
-static int hpc_toggle_emi(struct slot *slot)
-{
- u16 slot_cmd;
- u16 cmd_mask;
- int rc;
-
- slot_cmd = PCI_EXP_SLTCTL_EIC;
- cmd_mask = PCI_EXP_SLTCTL_EIC;
- rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
- slot->last_emi_toggle = get_seconds();
-
- return rc;
-}
-
static int hpc_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
@@ -874,8 +845,6 @@
.get_attention_status = hpc_get_attention_status,
.get_latch_status = hpc_get_latch_status,
.get_adapter_status = hpc_get_adapter_status,
- .get_emi_status = hpc_get_emi_status,
- .toggle_emi = hpc_toggle_emi,
.get_max_bus_speed = hpc_get_max_lnk_speed,
.get_cur_bus_speed = hpc_get_cur_lnk_speed,
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index e3dd6cf..5175d9b 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -82,7 +82,6 @@
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops skel_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 95d02a0..c159223 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -423,7 +423,6 @@
}
struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 2d6da78..a4494d7 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -83,7 +83,6 @@
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops sn_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index fe8d149..8a520a3 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -69,7 +69,6 @@
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index cd38916..e53eacd 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -39,6 +39,7 @@
#include <linux/sysdev.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
+#include <asm/e820.h>
#include "pci.h"
#define ROOT_SIZE VTD_PAGE_SIZE
@@ -53,6 +54,8 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+#define MAX_AGAW_WIDTH 64
+
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
@@ -131,8 +134,6 @@
context->lo &= (((u64)-1) << 2) | 1;
}
-#define CONTEXT_TT_MULTI_LEVEL 0
-
static inline void context_set_translation_type(struct context_entry *context,
unsigned long value)
{
@@ -217,6 +218,14 @@
return (pte->val & 3) != 0;
}
+/*
+ * This domain is a statically identity mapping domain.
+ * 1. This domain creats a static 1:1 mapping to all usable memory.
+ * 2. It maps to each iommu if successful.
+ * 3. Each iommu mapps to this domain if successful.
+ */
+struct dmar_domain *si_domain;
+
/* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -225,6 +234,9 @@
*/
#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
+
struct dmar_domain {
int id; /* domain id */
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
@@ -256,6 +268,7 @@
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
};
@@ -401,17 +414,13 @@
static inline int width_to_agaw(int width);
-/* calculate agaw for each iommu.
- * "SAGAW" may be different across iommus, use a default agaw, and
- * get a supported less agaw for iommus that don't support the default agaw.
- */
-int iommu_calculate_agaw(struct intel_iommu *iommu)
+static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw = -1;
sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ for (agaw = width_to_agaw(max_gaw);
agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
@@ -420,12 +429,32 @@
return agaw;
}
-/* in native case, each domain is related to only one iommu */
+/*
+ * Calculate max SAGAW for each iommu.
+ */
+int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
+}
+
+/*
+ * calculate agaw for each iommu.
+ * "SAGAW" may be different across iommus, use a default agaw, and
+ * get a supported less agaw for iommus that don't support the default agaw.
+ */
+int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+}
+
+/* This functionin only returns single iommu in a domain */
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
int iommu_id;
+ /* si_domain and vm domain should not get here. */
BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
+ BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
@@ -809,7 +838,7 @@
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
void *addr;
- u32 cmd, sts;
+ u32 sts;
unsigned long flag;
addr = iommu->root_entry;
@@ -817,12 +846,11 @@
spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
- cmd = iommu->gcmd | DMA_GCMD_SRTP;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
+ readl, (sts & DMA_GSTS_RTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
@@ -834,39 +862,25 @@
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
- val = iommu->gcmd | DMA_GCMD_WBF;
spin_lock_irqsave(&iommu->register_lock, flag);
- writel(val, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
+ readl, (!(val & DMA_GSTS_WBFS)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_context(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_context(struct intel_iommu *iommu,
+ u16 did, u16 source_id, u8 function_mask,
+ u64 type)
{
u64 val = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_CCMD_GLOBAL_INVL:
val = DMA_CCMD_GLOBAL_INVL;
@@ -891,33 +905,16 @@
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
-
- /* flush context entry will implicitly flush write buffer */
- return 0;
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_TLB_GLOBAL_FLUSH:
/* global flush doesn't need set IVA_REG */
@@ -965,37 +962,101 @@
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val));
- /* flush iotlb entry will implicitly flush write buffer */
- return 0;
}
-static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int pages, int non_present_entry_flush)
+static struct device_domain_info *iommu_support_dev_iotlb(
+ struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
{
- unsigned int mask;
+ int found = 0;
+ unsigned long flags;
+ struct device_domain_info *info;
+ struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
+
+ if (!ecap_dev_iotlb_support(iommu->ecap))
+ return NULL;
+
+ if (!iommu->qi)
+ return NULL;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->bus == bus && info->devfn == devfn) {
+ found = 1;
+ break;
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (!found || !info->dev)
+ return NULL;
+
+ if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
+ return NULL;
+
+ if (!dmar_find_matched_atsr_unit(info->dev))
+ return NULL;
+
+ info->iommu = iommu;
+
+ return info;
+}
+
+static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info)
+ return;
+
+ pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
+}
+
+static void iommu_disable_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ return;
+
+ pci_disable_ats(info->dev);
+}
+
+static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
+ u64 addr, unsigned mask)
+{
+ u16 sid, qdep;
+ unsigned long flags;
+ struct device_domain_info *info;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link) {
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ continue;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = pci_ats_queue_depth(info->dev);
+ qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int pages)
+{
+ unsigned int mask = ilog2(__roundup_pow_of_two(pages));
BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);
- /* Fallback to domain selective flush if no PSI support */
- if (!cap_pgsel_inv(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
-
/*
+ * Fallback to domain selective flush if no PSI support or the size is
+ * too big.
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
- mask = ilog2(__roundup_pow_of_two(pages));
- /* Fallback to domain selective flush if size is too big */
- if (mask > cap_max_amask_val(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH, non_present_entry_flush);
-
- return iommu->flush.flush_iotlb(iommu, did, addr, mask,
- DMA_TLB_PSI_FLUSH,
- non_present_entry_flush);
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr, mask,
+ DMA_TLB_PSI_FLUSH);
+ if (did)
+ iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1021,13 +1082,13 @@
unsigned long flags;
spin_lock_irqsave(&iommu->register_lock, flags);
- writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
+ iommu->gcmd |= DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
+ readl, (sts & DMA_GSTS_TES), sts);
- iommu->gcmd |= DMA_GCMD_TE;
spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
}
@@ -1043,7 +1104,7 @@
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
+ readl, (!(sts & DMA_GSTS_TES)), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag);
return 0;
@@ -1142,48 +1203,71 @@
free_context_table(iommu);
}
-static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
+static struct dmar_domain *alloc_domain(void)
{
- unsigned long num;
- unsigned long ndomains;
struct dmar_domain *domain;
- unsigned long flags;
domain = alloc_domain_mem();
if (!domain)
return NULL;
- ndomains = cap_ndoms(iommu->cap);
-
- spin_lock_irqsave(&iommu->lock, flags);
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- free_domain_mem(domain);
- printk(KERN_ERR "IOMMU: no free domain ids\n");
- return NULL;
- }
-
- set_bit(num, iommu->domain_ids);
- domain->id = num;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
- set_bit(iommu->seq_id, &domain->iommu_bmp);
domain->flags = 0;
- iommu->domains[num] = domain;
- spin_unlock_irqrestore(&iommu->lock, flags);
return domain;
}
-static void iommu_free_domain(struct dmar_domain *domain)
+static int iommu_attach_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
{
+ int num;
+ unsigned long ndomains;
unsigned long flags;
- struct intel_iommu *iommu;
- iommu = domain_get_iommu(domain);
+ ndomains = cap_ndoms(iommu->cap);
spin_lock_irqsave(&iommu->lock, flags);
- clear_bit(domain->id, iommu->domain_ids);
+
+ num = find_first_zero_bit(iommu->domain_ids, ndomains);
+ if (num >= ndomains) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ printk(KERN_ERR "IOMMU: no free domain ids\n");
+ return -ENOMEM;
+ }
+
+ domain->id = num;
+ set_bit(num, iommu->domain_ids);
+ set_bit(iommu->seq_id, &domain->iommu_bmp);
+ iommu->domains[num] = domain;
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return 0;
+}
+
+static void iommu_detach_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
+{
+ unsigned long flags;
+ int num, ndomains;
+ int found = 0;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ ndomains = cap_ndoms(iommu->cap);
+ num = find_first_bit(iommu->domain_ids, ndomains);
+ for (; num < ndomains; ) {
+ if (iommu->domains[num] == domain) {
+ found = 1;
+ break;
+ }
+ num = find_next_bit(iommu->domain_ids,
+ cap_ndoms(iommu->cap), num+1);
+ }
+
+ if (found) {
+ clear_bit(num, iommu->domain_ids);
+ clear_bit(iommu->seq_id, &domain->iommu_bmp);
+ iommu->domains[num] = NULL;
+ }
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -1303,6 +1387,8 @@
static void domain_exit(struct dmar_domain *domain)
{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
u64 end;
/* Domain 0 is reserved, so dont process it */
@@ -1321,12 +1407,15 @@
/* free page tables */
dma_pte_free_pagetable(domain, 0, end);
- iommu_free_domain(domain);
+ for_each_active_iommu(iommu, drhd)
+ if (test_bit(iommu->seq_id, &domain->iommu_bmp))
+ iommu_detach_domain(domain, iommu);
+
free_domain_mem(domain);
}
-static int domain_context_mapping_one(struct dmar_domain *domain,
- int segment, u8 bus, u8 devfn)
+static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
+ u8 bus, u8 devfn, int translation)
{
struct context_entry *context;
unsigned long flags;
@@ -1336,10 +1425,14 @@
unsigned long ndomains;
int id;
int agaw;
+ struct device_domain_info *info = NULL;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
BUG_ON(!domain->pgd);
+ BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
+ translation != CONTEXT_TT_MULTI_LEVEL);
iommu = device_to_iommu(segment, bus, devfn);
if (!iommu)
@@ -1357,7 +1450,8 @@
id = domain->id;
pgd = domain->pgd;
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
int found = 0;
/* find an available domain id for this device in iommu */
@@ -1382,6 +1476,7 @@
}
set_bit(num, iommu->domain_ids);
+ set_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = domain;
id = num;
}
@@ -1399,21 +1494,44 @@
}
context_set_domain_id(context, id);
- context_set_address_width(context, iommu->agaw);
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
+
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+ info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
+ translation = info ? CONTEXT_TT_DEV_IOTLB :
+ CONTEXT_TT_MULTI_LEVEL;
+ }
+ /*
+ * In pass through mode, AW must be programmed to indicate the largest
+ * AGAW value supported by hardware. And ASR is ignored by hardware.
+ */
+ if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
+ context_set_address_width(context, iommu->msagaw);
+ else {
+ context_set_address_root(context, virt_to_phys(pgd));
+ context_set_address_width(context, iommu->agaw);
+ }
+
+ context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
- /* it's a non-present to present mapping */
- if (iommu->flush.flush_context(iommu, domain->id,
- (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL, 1))
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we only need to flush the write-buffer. If the
+ * _does_ cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ } else {
iommu_flush_write_buffer(iommu);
- else
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
-
+ }
+ iommu_enable_dev_iotlb(info);
spin_unlock_irqrestore(&iommu->lock, flags);
spin_lock_irqsave(&domain->iommu_lock, flags);
@@ -1426,13 +1544,15 @@
}
static int
-domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
+domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
+ int translation)
{
int ret;
struct pci_dev *tmp, *parent;
ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
- pdev->bus->number, pdev->devfn);
+ pdev->bus->number, pdev->devfn,
+ translation);
if (ret)
return ret;
@@ -1446,7 +1566,7 @@
ret = domain_context_mapping_one(domain,
pci_domain_nr(parent->bus),
parent->bus->number,
- parent->devfn);
+ parent->devfn, translation);
if (ret)
return ret;
parent = parent->bus->self;
@@ -1454,12 +1574,14 @@
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate),
- tmp->subordinate->number, 0);
+ tmp->subordinate->number, 0,
+ translation);
else /* this is a legacy PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->bus),
tmp->bus->number,
- tmp->devfn);
+ tmp->devfn,
+ translation);
}
static int domain_context_mapped(struct pci_dev *pdev)
@@ -1540,9 +1662,8 @@
clear_context_table(iommu, bus, devfn);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1561,6 +1682,7 @@
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
free_devinfo_mem(info);
@@ -1597,6 +1719,7 @@
unsigned long flags;
int bus = 0, devfn = 0;
int segment;
+ int ret;
domain = find_domain(pdev);
if (domain)
@@ -1629,6 +1752,10 @@
}
}
+ domain = alloc_domain();
+ if (!domain)
+ goto error;
+
/* Allocate new domain for the device */
drhd = dmar_find_matched_drhd_unit(pdev);
if (!drhd) {
@@ -1638,9 +1765,11 @@
}
iommu = drhd->iommu;
- domain = iommu_alloc_domain(iommu);
- if (!domain)
+ ret = iommu_attach_domain(domain, iommu);
+ if (ret) {
+ domain_exit(domain);
goto error;
+ }
if (domain_init(domain, gaw)) {
domain_exit(domain);
@@ -1714,6 +1843,8 @@
return find_domain(pdev);
}
+static int iommu_identity_mapping;
+
static int iommu_prepare_identity_map(struct pci_dev *pdev,
unsigned long long start,
unsigned long long end)
@@ -1726,8 +1857,11 @@
printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
- /* page table init */
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ if (iommu_identity_mapping)
+ domain = si_domain;
+ else
+ /* page table init */
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
@@ -1756,7 +1890,7 @@
goto error;
/* context entry init */
- ret = domain_context_mapping(domain, pdev);
+ ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (!ret)
return 0;
error:
@@ -1857,13 +1991,141 @@
}
#endif /* !CONFIG_DMAR_FLPY_WA */
-static int __init init_dmars(void)
+/* Initialize each context entry as pass through.*/
+static int __init init_context_pass_through(void)
+{
+ struct pci_dev *pdev = NULL;
+ struct dmar_domain *domain;
+ int ret;
+
+ for_each_pci_dev(pdev) {
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ ret = domain_context_mapping(domain, pdev,
+ CONTEXT_TT_PASS_THROUGH);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int md_domain_init(struct dmar_domain *domain, int guest_width);
+static int si_domain_init(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ int ret = 0;
+
+ si_domain = alloc_domain();
+ if (!si_domain)
+ return -EFAULT;
+
+
+ for_each_active_iommu(iommu, drhd) {
+ ret = iommu_attach_domain(si_domain, iommu);
+ if (ret) {
+ domain_exit(si_domain);
+ return -EFAULT;
+ }
+ }
+
+ if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ domain_exit(si_domain);
+ return -EFAULT;
+ }
+
+ si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+
+ return 0;
+}
+
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
+ struct pci_dev *pdev);
+static int identity_mapping(struct pci_dev *pdev)
+{
+ struct device_domain_info *info;
+
+ if (likely(!iommu_identity_mapping))
+ return 0;
+
+
+ list_for_each_entry(info, &si_domain->devices, link)
+ if (info->dev == pdev)
+ return 1;
+ return 0;
+}
+
+static int domain_add_dev_info(struct dmar_domain *domain,
+ struct pci_dev *pdev)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ info = alloc_devinfo_mem();
+ if (!info)
+ return -ENOMEM;
+
+ info->segment = pci_domain_nr(pdev->bus);
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+ info->dev = pdev;
+ info->domain = domain;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_add(&info->link, &domain->devices);
+ list_add(&info->global, &device_domain_list);
+ pdev->dev.archdata.iommu = info;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+static int iommu_prepare_static_identity_mapping(void)
+{
+ int i;
+ struct pci_dev *pdev = NULL;
+ int ret;
+
+ ret = si_domain_init();
+ if (ret)
+ return -EFAULT;
+
+ printk(KERN_INFO "IOMMU: Setting identity map:\n");
+ for_each_pci_dev(pdev) {
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (ei->type == E820_RAM) {
+ ret = iommu_prepare_identity_map(pdev,
+ ei->addr, ei->addr + ei->size);
+ if (ret) {
+ printk(KERN_INFO "1:1 mapping to one domain failed.\n");
+ return -EFAULT;
+ }
+ }
+ }
+ ret = domain_add_dev_info(si_domain, pdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
struct pci_dev *pdev;
struct intel_iommu *iommu;
int i, ret;
+ int pass_through = 1;
+
+ /*
+ * In case pass through can not be enabled, iommu tries to use identity
+ * mapping.
+ */
+ if (iommu_pass_through)
+ iommu_identity_mapping = 1;
/*
* for each drhd
@@ -1917,7 +2179,15 @@
printk(KERN_ERR "IOMMU: allocate root entry failed\n");
goto error;
}
+ if (!ecap_pass_through(iommu->ecap))
+ pass_through = 0;
}
+ if (iommu_pass_through)
+ if (!pass_through) {
+ printk(KERN_INFO
+ "Pass Through is not supported by hardware.\n");
+ iommu_pass_through = 0;
+ }
/*
* Start from the sane iommu hardware state.
@@ -1973,35 +2243,60 @@
}
/*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
+ * If pass through is set and enabled, context entries of all pci
+ * devices are intialized by pass through translation type.
*/
- for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /* some BIOS lists non-exist devices in DMAR table */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
- if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
+ if (iommu_pass_through) {
+ ret = init_context_pass_through();
+ if (ret) {
+ printk(KERN_ERR "IOMMU: Pass through init failed.\n");
+ iommu_pass_through = 0;
}
}
- iommu_prepare_gfx_mapping();
+ /*
+ * If pass through is not set or not enabled, setup context entries for
+ * identity mappings for rmrr, gfx, and isa and may fall back to static
+ * identity mapping if iommu_identity_mapping is set.
+ */
+ if (!iommu_pass_through) {
+ if (iommu_identity_mapping)
+ iommu_prepare_static_identity_mapping();
+ /*
+ * For each rmrr
+ * for each dev attached to rmrr
+ * do
+ * locate drhd for dev, alloc domain for dev
+ * allocate free domain
+ * allocate page table entries for rmrr
+ * if context not allocated for bus
+ * allocate and init context
+ * set present in root table for this bus
+ * init context with domain, translation etc
+ * endfor
+ * endfor
+ */
+ printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ pdev = rmrr->devices[i];
+ /*
+ * some BIOS lists non-exist devices in DMAR
+ * table.
+ */
+ if (!pdev)
+ continue;
+ ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: mapping reserved region failed\n");
+ }
+ }
- iommu_prepare_isa();
+ iommu_prepare_gfx_mapping();
+
+ iommu_prepare_isa();
+ }
/*
* for each drhd
@@ -2023,10 +2318,8 @@
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- 0);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- 0);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu);
@@ -2112,7 +2405,8 @@
/* make sure context mapping is ok */
if (unlikely(!domain_context_mapped(pdev))) {
- ret = domain_context_mapping(domain, pdev);
+ ret = domain_context_mapping(domain, pdev,
+ CONTEXT_TT_MULTI_LEVEL);
if (ret) {
printk(KERN_ERR
"Domain context map for %s failed",
@@ -2124,6 +2418,52 @@
return domain;
}
+static int iommu_dummy(struct pci_dev *pdev)
+{
+ return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
+/* Check if the pdev needs to go through non-identity map and unmap process.*/
+static int iommu_no_mapping(struct pci_dev *pdev)
+{
+ int found;
+
+ if (!iommu_identity_mapping)
+ return iommu_dummy(pdev);
+
+ found = identity_mapping(pdev);
+ if (found) {
+ if (pdev->dma_mask > DMA_BIT_MASK(32))
+ return 1;
+ else {
+ /*
+ * 32 bit DMA is removed from si_domain and fall back
+ * to non-identity mapping.
+ */
+ domain_remove_one_dev_info(si_domain, pdev);
+ printk(KERN_INFO "32bit %s uses non-identity mapping\n",
+ pci_name(pdev));
+ return 0;
+ }
+ } else {
+ /*
+ * In case of a detached 64 bit DMA device from vm, the device
+ * is put into si_domain for identity mapping.
+ */
+ if (pdev->dma_mask > DMA_BIT_MASK(32)) {
+ int ret;
+ ret = domain_add_dev_info(si_domain, pdev);
+ if (!ret) {
+ printk(KERN_INFO "64bit %s uses identity mapping\n",
+ pci_name(pdev));
+ return 1;
+ }
+ }
+ }
+
+ return iommu_dummy(pdev);
+}
+
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask)
{
@@ -2136,7 +2476,8 @@
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+
+ if (iommu_no_mapping(pdev))
return paddr;
domain = get_valid_domain_for_dev(pdev);
@@ -2173,10 +2514,11 @@
if (ret)
goto error;
- /* it's a non-present to present mapping */
- ret = iommu_flush_iotlb_psi(iommu, domain->id,
- start_paddr, size >> VTD_PAGE_SHIFT, 1);
- if (ret)
+ /* it's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, 0, start_paddr,
+ size >> VTD_PAGE_SHIFT);
+ else
iommu_flush_write_buffer(iommu);
return start_paddr + ((u64)paddr & (~PAGE_MASK));
@@ -2210,15 +2552,22 @@
if (!iommu)
continue;
- if (deferred_flush[i].next) {
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
- for (j = 0; j < deferred_flush[i].next; j++) {
- __free_iova(&deferred_flush[i].domain[j]->iovad,
- deferred_flush[i].iova[j]);
- }
- deferred_flush[i].next = 0;
+ if (!deferred_flush[i].next)
+ continue;
+
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH);
+ for (j = 0; j < deferred_flush[i].next; j++) {
+ unsigned long mask;
+ struct iova *iova = deferred_flush[i].iova[j];
+
+ mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
+ mask = ilog2(mask >> VTD_PAGE_SHIFT);
+ iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+ iova->pfn_lo << PAGE_SHIFT, mask);
+ __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
+ deferred_flush[i].next = 0;
}
list_size = 0;
@@ -2269,8 +2618,9 @@
struct iova *iova;
struct intel_iommu *iommu;
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(pdev))
return;
+
domain = find_domain(pdev);
BUG_ON(!domain);
@@ -2291,9 +2641,8 @@
/* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
if (intel_iommu_strict) {
- if (iommu_flush_iotlb_psi(iommu,
- domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
- iommu_flush_write_buffer(iommu);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+ size >> VTD_PAGE_SHIFT);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2361,7 +2710,7 @@
struct scatterlist *sg;
struct intel_iommu *iommu;
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(pdev))
return;
domain = find_domain(pdev);
@@ -2384,9 +2733,8 @@
/* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
- if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
- size >> VTD_PAGE_SHIFT, 0))
- iommu_flush_write_buffer(iommu);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+ size >> VTD_PAGE_SHIFT);
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -2423,7 +2771,7 @@
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ if (iommu_no_mapping(pdev))
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
@@ -2478,10 +2826,13 @@
offset += size;
}
- /* it's a non-present to present mapping */
- if (iommu_flush_iotlb_psi(iommu, domain->id,
- start_addr, offset >> VTD_PAGE_SHIFT, 1))
+ /* it's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, 0, start_addr,
+ offset >> VTD_PAGE_SHIFT);
+ else
iommu_flush_write_buffer(iommu);
+
return nelems;
}
@@ -2640,9 +2991,9 @@
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu);
}
@@ -2657,9 +3008,9 @@
for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_TLB_GLOBAL_FLUSH);
}
}
@@ -2782,7 +3133,7 @@
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
- if (no_iommu || swiotlb || dmar_disabled)
+ if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
return -ENODEV;
iommu_init_mempool();
@@ -2802,7 +3153,15 @@
init_timer(&unmap_timer);
force_iommu = 1;
- dma_ops = &intel_dma_ops;
+
+ if (!iommu_pass_through) {
+ printk(KERN_INFO
+ "Multi-level page-table translation for DMAR.\n");
+ dma_ops = &intel_dma_ops;
+ } else
+ printk(KERN_INFO
+ "DMAR: Pass through translation for DMAR.\n");
+
init_iommu_sysfs();
register_iommu(&intel_iommu_ops);
@@ -2810,31 +3169,6 @@
return 0;
}
-static int vm_domain_add_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev)
-{
- struct device_domain_info *info;
- unsigned long flags;
-
- info = alloc_devinfo_mem();
- if (!info)
- return -ENOMEM;
-
- info->segment = pci_domain_nr(pdev->bus);
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->dev = pdev;
- info->domain = domain;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- pdev->dev.archdata.iommu = info;
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return 0;
-}
-
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
struct pci_dev *pdev)
{
@@ -2862,7 +3196,7 @@
}
}
-static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
+static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev)
{
struct device_domain_info *info;
@@ -2888,6 +3222,7 @@
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu_disable_dev_iotlb(info);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, pdev);
free_devinfo_mem(info);
@@ -2938,6 +3273,7 @@
spin_unlock_irqrestore(&device_domain_lock, flags1);
+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, info->dev);
@@ -2993,7 +3329,7 @@
return domain;
}
-static int vm_domain_init(struct dmar_domain *domain, int guest_width)
+static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
@@ -3084,7 +3420,7 @@
"intel_iommu_domain_init: dmar_domain == NULL\n");
return -ENOMEM;
}
- if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_init() failed\n");
vm_domain_exit(dmar_domain);
@@ -3119,8 +3455,9 @@
old_domain = find_domain(pdev);
if (old_domain) {
- if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_remove_one_dev_info(old_domain, pdev);
+ if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
+ domain_remove_one_dev_info(old_domain, pdev);
else
domain_remove_dev_info(old_domain);
}
@@ -3142,11 +3479,11 @@
return -EFAULT;
}
- ret = domain_context_mapping(dmar_domain, pdev);
+ ret = domain_add_dev_info(dmar_domain, pdev);
if (ret)
return ret;
- ret = vm_domain_add_dev_info(dmar_domain, pdev);
+ ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
return ret;
}
@@ -3156,7 +3493,7 @@
struct dmar_domain *dmar_domain = domain->priv;
struct pci_dev *pdev = to_pci_dev(dev);
- vm_domain_remove_one_dev_info(dmar_domain, pdev);
+ domain_remove_one_dev_info(dmar_domain, pdev);
}
static int intel_iommu_map_range(struct iommu_domain *domain,
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3a0cb0b..4f5b871 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -10,6 +10,8 @@
#include <linux/intel-iommu.h>
#include "intr_remapping.h"
#include <acpi/acpi.h>
+#include <asm/pci-direct.h>
+#include "pci.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num;
@@ -314,7 +316,8 @@
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
- set_64bit((unsigned long *)irte, irte_modified->low);
+ set_64bit((unsigned long *)&irte->low, irte_modified->low);
+ set_64bit((unsigned long *)&irte->high, irte_modified->high);
__iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0);
@@ -369,12 +372,32 @@
return drhd->iommu;
}
+static int clear_entries(struct irq_2_iommu *irq_iommu)
+{
+ struct irte *start, *entry, *end;
+ struct intel_iommu *iommu;
+ int index;
+
+ if (irq_iommu->sub_handle)
+ return 0;
+
+ iommu = irq_iommu->iommu;
+ index = irq_iommu->irte_index + irq_iommu->sub_handle;
+
+ start = iommu->ir_table->base + index;
+ end = start + (1 << irq_iommu->irte_mask);
+
+ for (entry = start; entry < end; entry++) {
+ set_64bit((unsigned long *)&entry->low, 0);
+ set_64bit((unsigned long *)&entry->high, 0);
+ }
+
+ return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
+}
+
int free_irte(int irq)
{
int rc = 0;
- int index, i;
- struct irte *irte;
- struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
unsigned long flags;
@@ -385,16 +408,7 @@
return -1;
}
- iommu = irq_iommu->iommu;
-
- index = irq_iommu->irte_index + irq_iommu->sub_handle;
- irte = &iommu->ir_table->base[index];
-
- if (!irq_iommu->sub_handle) {
- for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
- set_64bit((unsigned long *)(irte + i), 0);
- rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
- }
+ rc = clear_entries(irq_iommu);
irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0;
@@ -406,10 +420,95 @@
return rc;
}
+/*
+ * source validation type
+ */
+#define SVT_NO_VERIFY 0x0 /* no verification is required */
+#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
+#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
+
+/*
+ * source-id qualifier
+ */
+#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
+#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
+ * the third least significant bit
+ */
+#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
+ * the second and third least significant bits
+ */
+#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
+ * the least three significant bits
+ */
+
+/*
+ * set SVT, SQ and SID fields of irte to verify
+ * source ids of interrupt requests
+ */
+static void set_irte_sid(struct irte *irte, unsigned int svt,
+ unsigned int sq, unsigned int sid)
+{
+ irte->svt = svt;
+ irte->sq = sq;
+ irte->sid = sid;
+}
+
+int set_ioapic_sid(struct irte *irte, int apic)
+{
+ int i;
+ u16 sid = 0;
+
+ if (!irte)
+ return -1;
+
+ for (i = 0; i < MAX_IO_APICS; i++) {
+ if (ir_ioapic[i].id == apic) {
+ sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
+ break;
+ }
+ }
+
+ if (sid == 0) {
+ pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
+ return -1;
+ }
+
+ set_irte_sid(irte, 1, 0, sid);
+
+ return 0;
+}
+
+int set_msi_sid(struct irte *irte, struct pci_dev *dev)
+{
+ struct pci_dev *bridge;
+
+ if (!irte || !dev)
+ return -1;
+
+ /* PCIe device or Root Complex integrated PCI device */
+ if (dev->is_pcie || !dev->bus->parent) {
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+ (dev->bus->number << 8) | dev->devfn);
+ return 0;
+ }
+
+ bridge = pci_find_upstream_pcie_bridge(dev);
+ if (bridge) {
+ if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
+ set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
+ (bridge->bus->number << 8) | dev->bus->number);
+ else /* this is a legacy PCI bridge */
+ set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
+ (bridge->bus->number << 8) | bridge->devfn);
+ }
+
+ return 0;
+}
+
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
{
u64 addr;
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
addr = virt_to_phys((void *)iommu->ir_table->base);
@@ -420,9 +519,8 @@
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
/* Set interrupt-remapping table pointer */
- cmd = iommu->gcmd | DMA_GCMD_SIRTP;
iommu->gcmd |= DMA_GCMD_SIRTP;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
@@ -437,9 +535,8 @@
spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */
- cmd = iommu->gcmd | DMA_GCMD_IRE;
iommu->gcmd |= DMA_GCMD_IRE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
@@ -614,6 +711,35 @@
return -1;
}
+static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu)
+{
+ struct acpi_dmar_pci_path *path;
+ u8 bus;
+ int count;
+
+ bus = scope->bus;
+ path = (struct acpi_dmar_pci_path *)(scope + 1);
+ count = (scope->length - sizeof(struct acpi_dmar_device_scope))
+ / sizeof(struct acpi_dmar_pci_path);
+
+ while (--count > 0) {
+ /*
+ * Access PCI directly due to the PCI
+ * subsystem isn't initialized yet.
+ */
+ bus = read_pci_config_byte(bus, path->dev, path->fn,
+ PCI_SECONDARY_BUS);
+ path++;
+ }
+
+ ir_ioapic[ir_ioapic_num].bus = bus;
+ ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
+ ir_ioapic[ir_ioapic_num].iommu = iommu;
+ ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
+ ir_ioapic_num++;
+}
+
static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
@@ -638,9 +764,7 @@
" 0x%Lx\n", scope->enumeration_id,
drhd->address);
- ir_ioapic[ir_ioapic_num].iommu = iommu;
- ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
- ir_ioapic_num++;
+ ir_parse_one_ioapic_scope(scope, iommu);
}
start += scope->length;
}
diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h
index ca48f0d..63a263c 100644
--- a/drivers/pci/intr_remapping.h
+++ b/drivers/pci/intr_remapping.h
@@ -3,6 +3,8 @@
struct ioapic_scope {
struct intel_iommu *iommu;
unsigned int id;
+ unsigned int bus; /* PCI bus number */
+ unsigned int devfn; /* PCI devfn number */
};
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b497daa..e3a8721 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -5,6 +5,7 @@
*
* PCI Express I/O Virtualization (IOV) support.
* Single Root IOV 1.0
+ * Address Translation Service 1.0
*/
#include <linux/pci.h>
@@ -110,7 +111,7 @@
}
if (reset)
- pci_execute_reset_function(virtfn);
+ __pci_reset_function(virtfn);
pci_device_add(virtfn, virtfn->bus);
mutex_unlock(&iov->dev->sriov->lock);
@@ -164,7 +165,7 @@
if (reset) {
device_release_driver(&virtfn->dev);
- pci_execute_reset_function(virtfn);
+ __pci_reset_function(virtfn);
}
sprintf(buf, "virtfn%u", id);
@@ -487,13 +488,15 @@
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+ if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
+ iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
if (pdev)
iov->dev = pci_dev_get(pdev);
- else {
+ else
iov->dev = dev;
- mutex_init(&iov->lock);
- }
+
+ mutex_init(&iov->lock);
dev->sriov = iov;
dev->is_physfn = 1;
@@ -513,11 +516,11 @@
{
BUG_ON(dev->sriov->nr_virtfn);
- if (dev == dev->sriov->dev)
- mutex_destroy(&dev->sriov->lock);
- else
+ if (dev != dev->sriov->dev)
pci_dev_put(dev->sriov->dev);
+ mutex_destroy(&dev->sriov->lock);
+
kfree(dev->sriov);
dev->sriov = NULL;
}
@@ -679,3 +682,145 @@
return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
}
EXPORT_SYMBOL_GPL(pci_sriov_migration);
+
+static int ats_alloc_one(struct pci_dev *dev, int ps)
+{
+ int pos;
+ u16 cap;
+ struct pci_ats *ats;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ ats = kzalloc(sizeof(*ats), GFP_KERNEL);
+ if (!ats)
+ return -ENOMEM;
+
+ ats->pos = pos;
+ ats->stu = ps;
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+ ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+ dev->ats = ats;
+
+ return 0;
+}
+
+static void ats_free_one(struct pci_dev *dev)
+{
+ kfree(dev->ats);
+ dev->ats = NULL;
+}
+
+/**
+ * pci_enable_ats - enable the ATS capability
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ int rc;
+ u16 ctrl;
+
+ BUG_ON(dev->ats && dev->ats->is_enabled);
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ if (pdev->ats)
+ rc = pdev->ats->stu == ps ? 0 : -EINVAL;
+ else
+ rc = ats_alloc_one(pdev, ps);
+
+ if (!rc)
+ pdev->ats->ref_cnt++;
+ mutex_unlock(&pdev->sriov->lock);
+ if (rc)
+ return rc;
+ }
+
+ if (!dev->is_physfn) {
+ rc = ats_alloc_one(dev, ps);
+ if (rc)
+ return rc;
+ }
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 1;
+
+ return 0;
+}
+
+/**
+ * pci_disable_ats - disable the ATS capability
+ * @dev: the PCI device
+ */
+void pci_disable_ats(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ BUG_ON(!dev->ats || !dev->ats->is_enabled);
+
+ pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 0;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ pdev->ats->ref_cnt--;
+ if (!pdev->ats->ref_cnt)
+ ats_free_one(pdev);
+ mutex_unlock(&pdev->sriov->lock);
+ }
+
+ if (!dev->is_physfn)
+ ats_free_one(dev);
+}
+
+/**
+ * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
+ * @dev: the PCI device
+ *
+ * Returns the queue depth on success, or negative on failure.
+ *
+ * The ATS spec uses 0 in the Invalidate Queue Depth field to
+ * indicate that the function can accept 32 Invalidate Request.
+ * But here we use the `real' values (i.e. 1~32) for the Queue
+ * Depth; and 0 indicates the function shares the Queue with
+ * other functions (doesn't exclusively own a Queue).
+ */
+int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ if (dev->ats)
+ return dev->ats->qdep;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 3627732..d9f06fb 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -75,22 +75,17 @@
}
#endif
-static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
+static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
{
u16 control;
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
- control &= ~PCI_MSI_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
- }
-}
+ BUG_ON(!pos);
-static void msi_set_enable(struct pci_dev *dev, int enable)
-{
- __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
}
static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -131,9 +126,6 @@
* mask all MSI interrupts by clearing the MSI enable bit does not work
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
- *
- * Returns 1 if it succeeded in masking the interrupt and 0 if the device
- * doesn't support MSI masking.
*/
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
@@ -303,7 +295,7 @@
pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 0);
+ msi_set_enable(dev, pos, 0);
write_msi_msg(dev->irq, &entry->msg);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -321,22 +313,22 @@
if (!dev->msix_enabled)
return;
+ BUG_ON(list_empty(&dev->msi_list));
+ entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+ pos = entry->msi_attrib.pos;
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
/* route the table */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 0);
+ control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
write_msi_msg(entry->irq, &entry->msg);
msix_mask_irq(entry, entry->masked);
}
- BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- pos = entry->msi_attrib.pos;
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
control &= ~PCI_MSIX_FLAGS_MASKALL;
- control |= PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
}
@@ -365,9 +357,9 @@
u16 control;
unsigned mask;
- msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
+
pci_read_config_word(dev, msi_control_reg(pos), &control);
/* MSI Entry Initialization */
entry = alloc_msi_entry(dev);
@@ -381,7 +373,7 @@
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.pos = pos;
- entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
+ entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -399,7 +391,7 @@
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 1);
+ msi_set_enable(dev, pos, 1);
dev->msi_enabled = 1;
dev->irq = entry->irq;
@@ -427,11 +419,14 @@
u8 bir;
void __iomem *base;
- msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+
+ /* Ensure MSI-X is disabled while it is set up */
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
/* Request & Map MSI-X table region */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
nr_entries = multi_msix_capable(control);
pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -442,7 +437,6 @@
if (base == NULL)
return -ENOMEM;
- /* MSI-X Table Initialization */
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
if (!entry)
@@ -455,7 +449,6 @@
entry->msi_attrib.default_irq = dev->irq;
entry->msi_attrib.pos = pos;
entry->mask_base = base;
- msix_mask_irq(entry, 1);
list_add_tail(&entry->list, &dev->msi_list);
}
@@ -480,22 +473,31 @@
return ret;
}
+ /*
+ * Some devices require MSI-X to be enabled before we can touch the
+ * MSI-X registers. We need to mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
i = 0;
list_for_each_entry(entry, &dev->msi_list, list) {
entries[i].vector = entry->irq;
set_irq_msi(entry->irq, entry);
+ j = entries[i].entry;
+ entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ msix_mask_irq(entry, 1);
i++;
}
- /* Set MSI-X enabled bits */
+
+ /* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 1);
dev->msix_enabled = 1;
- list_for_each_entry(entry, &dev->msi_list, list) {
- int vector = entry->msi_attrib.entry_nr;
- entry->masked = readl(base + vector * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
- }
+ control &= ~PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
return 0;
}
@@ -596,17 +598,20 @@
struct msi_desc *desc;
u32 mask;
u16 ctrl;
+ unsigned pos;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- msi_set_enable(dev, 0);
+ BUG_ON(list_empty(&dev->msi_list));
+ desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ pos = desc->msi_attrib.pos;
+
+ msi_set_enable(dev, pos, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
- BUG_ON(list_empty(&dev->msi_list));
- desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
msi_mask_irq(desc, mask, ~mask);
@@ -648,10 +653,7 @@
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
if (entry->msi_attrib.is_msix) {
- writel(1, entry->mask_base + entry->msi_attrib.entry_nr
- * PCI_MSIX_ENTRY_SIZE
- + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
-
+ msix_mask_irq(entry, 1);
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
@@ -691,8 +693,8 @@
* indicates the successful configuration of MSI-X capability structure
* with new allocated MSI-X irqs. A return of < 0 indicates a failure.
* Or a return of > 0 indicates that driver request is exceeding the number
- * of irqs available. Driver should use the returned value to re-send
- * its request.
+ * of irqs or MSI-X vectors available. Driver should use the returned value to
+ * re-send its request.
**/
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{
@@ -708,7 +710,7 @@
nr_entries = pci_msix_table_size(dev);
if (nvec > nr_entries)
- return -EINVAL;
+ return nr_entries;
/* Check for any invalid entries */
for (i = 0; i < nvec; i++) {
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 71f4df2..a066284 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -16,21 +16,15 @@
#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
#define msi_data_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 )
-#define msi_mask_bits_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
-#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
+ (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
+#define msi_mask_reg(base, is64bit) \
+ (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
#define msix_table_offset_reg(base) (base + 0x04)
#define msix_pba_offset_reg(base) (base + 0x08)
-#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
-#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-#define multi_msix_capable msix_table_size
-#define msix_unmask(address) (address & ~PCI_MSIX_FLAGS_BITMASK)
-#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
-#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
+#define multi_msix_capable(control) msix_table_size((control))
#endif /* MSI_H */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 07bbb9b..6c93af5 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -485,6 +485,8 @@
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
+ case PCI_D3hot:
+ case PCI_D3cold:
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
@@ -1208,7 +1210,7 @@
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int error = 0;
bool pme_done = false;
@@ -1287,15 +1289,14 @@
default:
target_state = state;
}
+ } else if (!dev->pm_cap) {
+ target_state = PCI_D0;
} else if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
* to generate PME#.
*/
- if (!dev->pm_cap)
- return PCI_POWER_ERROR;
-
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
@@ -1532,7 +1533,7 @@
if (!pin)
return -1;
- while (dev->bus->parent) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -1552,7 +1553,7 @@
{
u8 pin = *pinp;
- while (dev->bus->parent) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -2058,111 +2059,177 @@
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
#endif
-static int __pcie_flr(struct pci_dev *dev, int probe)
+static int pcie_flr(struct pci_dev *dev, int probe)
{
- u16 status;
+ int i;
+ int pos;
u32 cap;
- int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ u16 status;
- if (!exppos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
return -ENOTTY;
- pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+
+ pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto transaction_done;
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
- msleep(100);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto transaction_done;
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ if (!(status & PCI_EXP_DEVSTA_TRPND))
+ goto clear;
+ }
- dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
- "sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
-transaction_done:
- pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
+clear:
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR);
- mdelay(100);
+ msleep(100);
- pci_unblock_user_cfg_access(dev);
return 0;
}
-static int __pci_af_flr(struct pci_dev *dev, int probe)
+static int pci_af_flr(struct pci_dev *dev, int probe)
{
- int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
- u8 status;
+ int i;
+ int pos;
u8 cap;
+ u8 status;
- if (!cappos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_AF);
+ if (!pos)
return -ENOTTY;
- pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
+
+ pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto transaction_done;
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+ pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
+ if (!(status & PCI_AF_STATUS_TP))
+ goto clear;
+ }
+
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
+
+clear:
+ pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
msleep(100);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto transaction_done;
- dev_info(&dev->dev, "Busy after 100ms while trying to"
- " reset; sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (status & PCI_AF_STATUS_TP)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
-
-transaction_done:
- pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
- mdelay(100);
-
- pci_unblock_user_cfg_access(dev);
return 0;
}
-static int __pci_reset_function(struct pci_dev *pdev, int probe)
+static int pci_pm_reset(struct pci_dev *dev, int probe)
{
- int res;
+ u16 csr;
- res = __pcie_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ if (!dev->pm_cap)
+ return -ENOTTY;
- res = __pci_af_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
+ if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
+ return -ENOTTY;
- return res;
+ if (probe)
+ return 0;
+
+ if (dev->current_state != PCI_D0)
+ return -EINVAL;
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
+
+ return 0;
+}
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
+ u16 ctrl;
+ struct pci_dev *pdev;
+
+ if (dev->subordinate)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
+ return 0;
+}
+
+static int pci_dev_reset(struct pci_dev *dev, int probe)
+{
+ int rc;
+
+ might_sleep();
+
+ if (!probe) {
+ pci_block_user_cfg_access(dev);
+ /* block PM suspend, driver probe, etc. */
+ down(&dev->dev.sem);
+ }
+
+ rc = pcie_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_af_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_pm_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_parent_bus_reset(dev, probe);
+done:
+ if (!probe) {
+ up(&dev->dev.sem);
+ pci_unblock_user_cfg_access(dev);
+ }
+
+ return rc;
}
/**
- * pci_execute_reset_function() - Reset a PCI device function
- * @dev: Device function to reset
+ * __pci_reset_function - reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2174,18 +2241,18 @@
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
* etc.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
-int pci_execute_reset_function(struct pci_dev *dev)
+int __pci_reset_function(struct pci_dev *dev)
{
- return __pci_reset_function(dev, 0);
+ return pci_dev_reset(dev, 0);
}
-EXPORT_SYMBOL_GPL(pci_execute_reset_function);
+EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
- * pci_reset_function() - quiesce and reset a PCI device function
- * @dev: Device function to reset
+ * pci_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2193,32 +2260,33 @@
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from pci_execute_reset_function in that it saves and restores device state
+ * from __pci_reset_function in that it saves and restores device state
* over the reset.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
int pci_reset_function(struct pci_dev *dev)
{
- int r = __pci_reset_function(dev, 1);
+ int rc;
- if (r < 0)
- return r;
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- disable_irq(dev->irq);
pci_save_state(dev);
+ /*
+ * both INTx and MSI are disabled after the Interrupt Disable bit
+ * is set and the Bus Master bit is cleared.
+ */
pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
- r = pci_execute_reset_function(dev);
+ rc = pci_dev_reset(dev, 0);
pci_restore_state(dev);
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- enable_irq(dev->irq);
- return r;
+ return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -2591,6 +2659,8 @@
} else if (!strncmp(str, "resource_alignment=", 19)) {
pci_set_resource_alignment_param(str + 19,
strlen(str + 19));
+ } else if (!strncmp(str, "ecrc=", 5)) {
+ pcie_ecrc_get_policy(str + 5);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d03f6b9..f73bcbe 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -229,6 +229,15 @@
u8 __iomem *mstate; /* VF Migration State Array */
};
+/* Address Translation Service */
+struct pci_ats {
+ int pos; /* capability position */
+ int stu; /* Smallest Translation Unit */
+ int qdep; /* Invalidate Queue Depth */
+ int ref_cnt; /* Physical Function reference count */
+ int is_enabled:1; /* Enable bit is set */
+};
+
#ifdef CONFIG_PCI_IOV
extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
@@ -236,6 +245,20 @@
enum pci_bar_type *type);
extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus);
+
+extern int pci_enable_ats(struct pci_dev *dev, int ps);
+extern void pci_disable_ats(struct pci_dev *dev);
+extern int pci_ats_queue_depth(struct pci_dev *dev);
+/**
+ * pci_ats_enabled - query the ATS status
+ * @dev: the PCI device
+ *
+ * Returns 1 if ATS capability is enabled, or 0 if not.
+ */
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return dev->ats && dev->ats->is_enabled;
+}
#else
static inline int pci_iov_init(struct pci_dev *dev)
{
@@ -257,6 +280,22 @@
{
return 0;
}
+
+static inline int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ return -ENODEV;
+}
+static inline void pci_disable_ats(struct pci_dev *dev)
+{
+}
+static inline int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return 0;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index c3bde58..50e94e0 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -10,3 +10,18 @@
This enables PCI Express Root Port Advanced Error Reporting
(AER) driver support. Error reporting messages sent to Root
Port will be handled by PCI Express AER driver.
+
+
+#
+# PCI Express ECRC
+#
+config PCIE_ECRC
+ bool "PCI Express ECRC settings control"
+ depends on PCIEAER
+ help
+ Used to override firmware/bios settings for PCI Express ECRC
+ (transaction layer end-to-end CRC checking).
+
+ When in doubt, say N.
+
+source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 0000000..b8c925c
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
+#
+# PCI Express Root Port Device AER Debug Configuration
+#
+
+config PCIEAER_INJECT
+ tristate "PCIE AER error injector support"
+ depends on PCIEAER
+ default n
+ help
+ This enables PCI Express Root Port Advanced Error Reporting
+ (AER) software error injector.
+
+ Debuging PCIE AER code is quite difficult because it is hard
+ to trigger various real hardware errors. Software based
+ error injection can fake almost all kinds of errors with the
+ help of a user space helper tool aer-inject, which can be
+ gotten from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 8da3bd8..2cba675 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,6 +4,9 @@
obj-$(CONFIG_PCIEAER) += aerdriver.o
+obj-$(CONFIG_PCIE_ECRC) += ecrc.o
+
aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
+obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 0000000..d92ae21
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,473 @@
+/*
+ * PCIE AER software error injection support.
+ *
+ * Debuging PCIE AER code is quite difficult because it is hard to
+ * trigger various real hardware errors. Software based error
+ * injection can fake almost all kinds of errors with the help of a
+ * user space helper tool aer-inject, which can be gotten from:
+ * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+ *
+ * Copyright 2009 Intel Corporation.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include "aerdrv.h"
+
+struct aer_error_inj
+{
+ u8 bus;
+ u8 dev;
+ u8 fn;
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+};
+
+struct aer_error
+{
+ struct list_head list;
+ unsigned int bus;
+ unsigned int devfn;
+ int pos_cap_err;
+
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+ u32 root_status;
+ u32 source_id;
+};
+
+struct pci_bus_ops
+{
+ struct list_head list;
+ struct pci_bus *bus;
+ struct pci_ops *ops;
+};
+
+static LIST_HEAD(einjected);
+
+static LIST_HEAD(pci_bus_ops_list);
+
+/* Protect einjected and pci_bus_ops_list */
+static DEFINE_SPINLOCK(inject_lock);
+
+static void aer_error_init(struct aer_error *err, unsigned int bus,
+ unsigned int devfn, int pos_cap_err)
+{
+ INIT_LIST_HEAD(&err->list);
+ err->bus = bus;
+ err->devfn = devfn;
+ err->pos_cap_err = pos_cap_err;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
+{
+ struct aer_error *err;
+
+ list_for_each_entry(err, &einjected, list) {
+ if (bus == err->bus && devfn == err->devfn)
+ return err;
+ }
+ return NULL;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
+{
+ return __find_aer_error(dev->bus->number, dev->devfn);
+}
+
+/* inject_lock must be held before calling */
+static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
+{
+ struct pci_bus_ops *bus_ops;
+
+ list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
+ if (bus_ops->bus == bus)
+ return bus_ops->ops;
+ }
+ return NULL;
+}
+
+static struct pci_bus_ops *pci_bus_ops_pop(void)
+{
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops = NULL;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (list_empty(&pci_bus_ops_list))
+ bus_ops = NULL;
+ else {
+ struct list_head *lh = pci_bus_ops_list.next;
+ list_del(lh);
+ bus_ops = list_entry(lh, struct pci_bus_ops, list);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return bus_ops;
+}
+
+static u32 *find_pci_config_dword(struct aer_error *err, int where,
+ int *prw1cs)
+{
+ int rw1cs = 0;
+ u32 *target = NULL;
+
+ if (err->pos_cap_err == -1)
+ return NULL;
+
+ switch (where - err->pos_cap_err) {
+ case PCI_ERR_UNCOR_STATUS:
+ target = &err->uncor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_COR_STATUS:
+ target = &err->cor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_HEADER_LOG:
+ target = &err->header_log0;
+ break;
+ case PCI_ERR_HEADER_LOG+4:
+ target = &err->header_log1;
+ break;
+ case PCI_ERR_HEADER_LOG+8:
+ target = &err->header_log2;
+ break;
+ case PCI_ERR_HEADER_LOG+12:
+ target = &err->header_log3;
+ break;
+ case PCI_ERR_ROOT_STATUS:
+ target = &err->root_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_ROOT_COR_SRC:
+ target = &err->source_id;
+ break;
+ }
+ if (prw1cs)
+ *prw1cs = rw1cs;
+ return target;
+}
+
+static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, NULL);
+ if (sim) {
+ *val = *sim;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->read(bus, devfn, where, size, val);
+}
+
+int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ int rw1cs;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, &rw1cs);
+ if (sim) {
+ if (rw1cs)
+ *sim ^= val;
+ else
+ *sim = val;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops pci_ops_aer = {
+ .read = pci_read_aer,
+ .write = pci_write_aer,
+};
+
+static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
+ struct pci_bus *bus,
+ struct pci_ops *ops)
+{
+ INIT_LIST_HEAD(&bus_ops->list);
+ bus_ops->bus = bus;
+ bus_ops->ops = ops;
+}
+
+static int pci_bus_set_aer_ops(struct pci_bus *bus)
+{
+ struct pci_ops *ops;
+ struct pci_bus_ops *bus_ops;
+ unsigned long flags;
+
+ bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+ ops = pci_bus_set_ops(bus, &pci_ops_aer);
+ spin_lock_irqsave(&inject_lock, flags);
+ if (ops == &pci_ops_aer)
+ goto out;
+ pci_bus_ops_init(bus_ops, bus, ops);
+ list_add(&bus_ops->list, &pci_bus_ops_list);
+ bus_ops = NULL;
+out:
+ spin_unlock_irqrestore(&inject_lock, flags);
+ if (bus_ops)
+ kfree(bus_ops);
+ return 0;
+}
+
+static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+ while (1) {
+ if (!dev->is_pcie)
+ break;
+ if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ if (!dev->bus->self)
+ break;
+ dev = dev->bus->self;
+ }
+ return NULL;
+}
+
+static int find_aer_device_iter(struct device *device, void *data)
+{
+ struct pcie_device **result = data;
+ struct pcie_device *pcie_dev;
+
+ if (device->bus == &pcie_port_bus_type) {
+ pcie_dev = to_pcie_device(device);
+ if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
+ *result = pcie_dev;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
+{
+ return device_for_each_child(&dev->dev, result, find_aer_device_iter);
+}
+
+static int aer_inject(struct aer_error_inj *einj)
+{
+ struct aer_error *err, *rperr;
+ struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
+ struct pci_dev *dev, *rpdev;
+ struct pcie_device *edev;
+ unsigned long flags;
+ unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
+ int pos_cap_err, rp_pos_cap_err;
+ u32 sever;
+ int ret = 0;
+
+ dev = pci_get_bus_and_slot(einj->bus, devfn);
+ if (!dev)
+ return -EINVAL;
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
+
+ rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
+ if (!rp_pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+
+ err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!err_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!rperr_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+
+ err = __find_aer_error_by_dev(dev);
+ if (!err) {
+ err = err_alloc;
+ err_alloc = NULL;
+ aer_error_init(err, einj->bus, devfn, pos_cap_err);
+ list_add(&err->list, &einjected);
+ }
+ err->uncor_status |= einj->uncor_status;
+ err->cor_status |= einj->cor_status;
+ err->header_log0 = einj->header_log0;
+ err->header_log1 = einj->header_log1;
+ err->header_log2 = einj->header_log2;
+ err->header_log3 = einj->header_log3;
+
+ rperr = __find_aer_error_by_dev(rpdev);
+ if (!rperr) {
+ rperr = rperr_alloc;
+ rperr_alloc = NULL;
+ aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
+ rp_pos_cap_err);
+ list_add(&rperr->list, &einjected);
+ }
+ if (einj->cor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
+ else
+ rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
+ rperr->source_id &= 0xffff0000;
+ rperr->source_id |= (einj->bus << 8) | devfn;
+ }
+ if (einj->uncor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ if (sever & einj->uncor_status) {
+ rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
+ if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
+ rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
+ } else
+ rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
+ rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
+ rperr->source_id &= 0x0000ffff;
+ rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+
+ ret = pci_bus_set_aer_ops(dev->bus);
+ if (ret)
+ goto out_put;
+ ret = pci_bus_set_aer_ops(rpdev->bus);
+ if (ret)
+ goto out_put;
+
+ if (find_aer_device(rpdev, &edev))
+ aer_irq(-1, edev);
+ else
+ ret = -EINVAL;
+out_put:
+ if (err_alloc)
+ kfree(err_alloc);
+ if (rperr_alloc)
+ kfree(rperr_alloc);
+ pci_dev_put(dev);
+ return ret;
+}
+
+static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ struct aer_error_inj einj;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (usize != sizeof(struct aer_error_inj))
+ return -EINVAL;
+
+ if (copy_from_user(&einj, ubuf, usize))
+ return -EFAULT;
+
+ ret = aer_inject(&einj);
+ return ret ? ret : usize;
+}
+
+static const struct file_operations aer_inject_fops = {
+ .write = aer_inject_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice aer_inject_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "aer_inject",
+ .fops = &aer_inject_fops,
+};
+
+static int __init aer_inject_init(void)
+{
+ return misc_register(&aer_inject_device);
+}
+
+static void __exit aer_inject_exit(void)
+{
+ struct aer_error *err, *err_next;
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops;
+
+ misc_deregister(&aer_inject_device);
+
+ while ((bus_ops = pci_bus_ops_pop())) {
+ pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
+ kfree(bus_ops);
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+ list_for_each_entry_safe(err, err_next,
+ &pci_bus_ops_list, list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+}
+
+module_init(aer_inject_init);
+module_exit(aer_inject_exit);
+
+MODULE_DESCRIPTION("PCIE AER software error injector");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 32ade5a..4770f13 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -77,7 +77,7 @@
*
* Invoked when Root Port detects AER messages.
**/
-static irqreturn_t aer_irq(int irq, void *context)
+irqreturn_t aer_irq(int irq, void *context)
{
unsigned int status, id;
struct pcie_device *pdev = (struct pcie_device *)context;
@@ -126,6 +126,7 @@
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(aer_irq);
/**
* aer_alloc_rpc - allocate Root Port data structure
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index aa14482..bbd7428 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
+#include <linux/interrupt.h>
#define AER_NONFATAL 0
#define AER_FATAL 1
@@ -56,7 +57,11 @@
unsigned int dw3;
};
+#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
+ struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int error_dev_num;
+ u16 id;
int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
int flags;
unsigned int status; /* COR/UNCOR Error Status */
@@ -120,6 +125,7 @@
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+extern irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI
extern int aer_osc_setup(struct pcie_device *pciedev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 307452f..3d88727 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -26,7 +26,9 @@
#include "aerdrv.h"
static int forceload;
+static int nosourceid;
module_param(forceload, bool, 0);
+module_param(nosourceid, bool, 0);
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -109,19 +111,23 @@
#endif /* 0 */
-static void set_device_error_reporting(struct pci_dev *dev, void *data)
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
- if (dev->pcie_type != PCIE_RC_PORT &&
- dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
- dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
- return;
+ if (dev->pcie_type == PCIE_RC_PORT ||
+ dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
+ dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
}
/**
@@ -139,73 +145,148 @@
pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
}
-static int find_device_iter(struct device *device, void *data)
+static inline int compare_device_id(struct pci_dev *dev,
+ struct aer_err_info *e_info)
{
- struct pci_dev *dev;
- u16 id = *(unsigned long *)data;
- u8 secondary, subordinate, d_bus = id >> 8;
-
- if (device->bus == &pci_bus_type) {
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn)) {
- /*
- * Device ID match
- */
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
-
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
/*
- * If device is P2P, check if it is an upstream?
+ * Device ID match
*/
- if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(dev, PCI_SECONDARY_BUS,
- &secondary);
- pci_read_config_byte(dev, PCI_SUBORDINATE_BUS,
- &subordinate);
- if (d_bus >= secondary && d_bus <= subordinate) {
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
- }
+ return 1;
}
return 0;
}
+static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
+{
+ if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
+ e_info->dev[e_info->error_dev_num] = dev;
+ e_info->error_dev_num++;
+ return 1;
+ } else
+ return 0;
+}
+
+
+#define PCI_BUS(x) (((x) >> 8) & 0xff)
+
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ int pos;
+ u32 status;
+ u32 mask;
+ u16 reg16;
+ int result;
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ /*
+ * When bus id is equal to 0, it might be a bad id
+ * reported by root port.
+ */
+ if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
+ result = compare_device_id(dev, e_info);
+ if (result)
+ add_error_device(e_info, dev);
+
+ /*
+ * If there is no multiple error, we stop
+ * or continue based on the id comparing.
+ */
+ if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
+ return result;
+
+ /*
+ * If there are multiple errors and id does match,
+ * We need continue to search other devices under
+ * the root port. Return 0 means that.
+ */
+ if (result)
+ return 0;
+ }
+
+ /*
+ * When either
+ * 1) nosourceid==y;
+ * 2) bus id is equal to 0. Some ports might lose the bus
+ * id of error source id;
+ * 3) There are multiple errors and prior id comparing fails;
+ * We check AER status registers to find the initial reporter.
+ */
+ if (atomic_read(&dev->enable_cnt) == 0)
+ return 0;
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ /* Check if AER is enabled */
+ pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, ®16);
+ if (!(reg16 & (
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE)))
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return 0;
+
+ status = 0;
+ mask = 0;
+ if (e_info->severity == AER_CORRECTABLE) {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_MASK,
+ &mask);
+ if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
+ }
+ } else {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_MASK,
+ &mask);
+ if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
+ }
+ }
+
+ return 0;
+
+added:
+ if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
+ return 0;
+ else
+ return 1;
+}
+
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @id: device ID of agent who sends an error message to this Root Port
+ * @err_info: including detailed error information such like id
*
* Invoked when error is detected at the Root Port.
*/
-static struct device* find_source_device(struct pci_dev *parent, u16 id)
+static void find_source_device(struct pci_dev *parent,
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
- struct device *device;
- unsigned long device_addr;
- int status;
+ int result;
/* Is Root Port an agent that sends error message? */
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return &dev->dev;
+ result = find_device_iter(dev, e_info);
+ if (result)
+ return;
- do {
- device_addr = id;
- if ((status = device_for_each_child(&dev->dev,
- &device_addr, find_device_iter))) {
- device = (struct device*)device_addr;
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return device;
- }
- }while (status);
-
- return NULL;
+ pci_walk_bus(parent->subordinate, find_device_iter, e_info);
}
-static void report_error_detected(struct pci_dev *dev, void *data)
+static int report_error_detected(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -230,16 +311,16 @@
dev->driver ?
"no AER-aware driver" : "no driver");
}
- return;
+ return 0;
}
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_mmio_enabled(struct pci_dev *dev, void *data)
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -249,15 +330,15 @@
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_slot_reset(struct pci_dev *dev, void *data)
+static int report_slot_reset(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -267,15 +348,15 @@
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_resume(struct pci_dev *dev, void *data)
+static int report_resume(struct pci_dev *dev, void *data)
{
struct pci_error_handlers *err_handler;
@@ -284,11 +365,11 @@
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
- return;
+ return 0;
}
/**
@@ -305,7 +386,7 @@
static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
enum pci_channel_state state,
char *error_mesg,
- void (*cb)(struct pci_dev *, void *))
+ int (*cb)(struct pci_dev *, void *))
{
struct aer_broadcast_data result_data;
@@ -497,12 +578,12 @@
*/
static void handle_error_source(struct pcie_device * aerdev,
struct pci_dev *dev,
- struct aer_err_info info)
+ struct aer_err_info *info)
{
pci_ers_result_t status = 0;
int pos;
- if (info.severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE) {
/*
* Correctable error does not need software intevention.
* No need to go through error recovery process.
@@ -510,9 +591,9 @@
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
- info.status);
+ info->status);
} else {
- status = do_recovery(aerdev, dev, info.severity);
+ status = do_recovery(aerdev, dev, info->severity);
if (status == PCI_ERS_RESULT_RECOVERED) {
dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
"successfully recovered\n");
@@ -661,6 +742,28 @@
return AER_SUCCESS;
}
+static inline void aer_process_err_devices(struct pcie_device *p_device,
+ struct aer_err_info *e_info)
+{
+ int i;
+
+ if (!e_info->dev[0]) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ }
+
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info) ==
+ AER_SUCCESS) {
+ aer_print_error(e_info->dev[i], e_info);
+ handle_error_source(p_device,
+ e_info->dev[i],
+ e_info);
+ }
+ }
+}
+
/**
* aer_isr_one_error - consume an error detected by root port
* @p_device: pointer to error root port service device
@@ -669,10 +772,16 @@
static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
- struct device *s_device;
- struct aer_err_info e_info = {0, 0, 0,};
+ struct aer_err_info *e_info;
int i;
- u16 id;
+
+ /* struct aer_err_info might be big, so we allocate it with slab */
+ e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
+ if (e_info == NULL) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "Can't allocate mem when processing AER errors\n");
+ return;
+ }
/*
* There is a possibility that both correctable error and
@@ -684,31 +793,26 @@
if (!(e_src->status & i))
continue;
+ memset(e_info, 0, sizeof(struct aer_err_info));
+
/* Init comprehensive error information */
if (i & PCI_ERR_ROOT_COR_RCV) {
- id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
} else {
- id = ERR_UNCOR_ID(e_src->id);
- e_info.severity = ((e_src->status >> 6) & 1);
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+ e_info->severity = ((e_src->status >> 6) & 1);
}
if (e_src->status &
(PCI_ERR_ROOT_MULTI_COR_RCV |
PCI_ERR_ROOT_MULTI_UNCOR_RCV))
- e_info.flags |= AER_MULTI_ERROR_VALID_FLAG;
- if (!(s_device = find_source_device(p_device->port, id))) {
- printk(KERN_DEBUG "%s->can't find device of ID%04x\n",
- __func__, id);
- continue;
- }
- if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
- AER_SUCCESS) {
- aer_print_error(to_pci_dev(s_device), &e_info);
- handle_error_source(p_device,
- to_pci_dev(s_device),
- e_info);
- }
+ e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
+
+ find_source_device(p_device->port, e_info);
+ aer_process_err_devices(p_device, e_info);
}
+
+ kfree(e_info);
}
/**
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 0000000..ece97df
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
+/*
+ * Enables/disables PCIe ECRC checking.
+ *
+ * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ * Andrew Patterson <andrew.patterson@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/errno.h>
+#include "../../pci.h"
+
+#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
+#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
+#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
+
+static int ecrc_policy = ECRC_POLICY_DEFAULT;
+
+static const char *ecrc_policy_str[] = {
+ [ECRC_POLICY_DEFAULT] = "bios",
+ [ECRC_POLICY_OFF] = "off",
+ [ECRC_POLICY_ON] = "on"
+};
+
+/**
+ * enable_ercr_checking - enable PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int enable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
+ if (reg32 & PCI_ERR_CAP_ECRC_GENC)
+ reg32 |= PCI_ERR_CAP_ECRC_GENE;
+ if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
+ reg32 |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * disable_ercr_checking - disables PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int disable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
+ reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
+ * @dev: the PCI device
+ */
+void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ switch (ecrc_policy) {
+ case ECRC_POLICY_DEFAULT:
+ return;
+ case ECRC_POLICY_OFF:
+ disable_ecrc_checking(dev);
+ break;
+ case ECRC_POLICY_ON:
+ enable_ecrc_checking(dev);;
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ */
+void pcie_ecrc_get_policy(char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
+ if (!strncmp(str, ecrc_policy_str[i],
+ strlen(ecrc_policy_str[i])))
+ break;
+ if (i >= ARRAY_SIZE(ecrc_policy_str))
+ return;
+
+ ecrc_policy = i;
+}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0367f1..3d27c97 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,40 +26,36 @@
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-struct endpoint_state {
- unsigned int l0s_acceptable_latency;
- unsigned int l1_acceptable_latency;
+struct aspm_latency {
+ u32 l0s; /* L0s latency (nsec) */
+ u32 l1; /* L1 latency (nsec) */
};
struct pcie_link_state {
- struct list_head sibiling;
- struct pci_dev *pdev;
- bool downstream_has_switch;
-
- struct pcie_link_state *parent;
- struct list_head children;
- struct list_head link;
+ struct pci_dev *pdev; /* Upstream component of the Link */
+ struct pcie_link_state *root; /* pointer to the root port link */
+ struct pcie_link_state *parent; /* pointer to the parent Link state */
+ struct list_head sibling; /* node in link_list */
+ struct list_head children; /* list of child link states */
+ struct list_head link; /* node in parent's children list */
/* ASPM state */
- unsigned int support_state;
- unsigned int enabled_state;
- unsigned int bios_aspm_state;
- /* upstream component */
- unsigned int l0s_upper_latency;
- unsigned int l1_upper_latency;
- /* downstream component */
- unsigned int l0s_down_latency;
- unsigned int l1_down_latency;
- /* Clock PM state*/
- unsigned int clk_pm_capable;
- unsigned int clk_pm_enabled;
- unsigned int bios_clk_state;
+ u32 aspm_support:2; /* Supported ASPM state */
+ u32 aspm_enabled:2; /* Enabled ASPM state */
+ u32 aspm_default:2; /* Default ASPM state by BIOS */
+ /* Clock PM state */
+ u32 clkpm_capable:1; /* Clock PM capable? */
+ u32 clkpm_enabled:1; /* Current Clock PM state */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+
+ /* Latencies */
+ struct aspm_latency latency; /* Exit latency */
/*
- * A pcie downstream port only has one slot under it, so at most there
- * are 8 functions
+ * Endpoint acceptable latencies. A pcie downstream port only
+ * has one slot under it, so at most there are 8 functions.
*/
- struct endpoint_state endpoints[8];
+ struct aspm_latency acceptable[8];
};
static int aspm_disabled, aspm_force;
@@ -78,27 +74,23 @@
#define LINK_RETRAIN_TIMEOUT HZ
-static int policy_to_aspm_state(struct pci_dev *pdev)
+static int policy_to_aspm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_DEFAULT:
- return link_state->bios_aspm_state;
+ return link->aspm_default;
}
return 0;
}
-static int policy_to_clkpm_state(struct pci_dev *pdev)
+static int policy_to_clkpm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
@@ -107,73 +99,78 @@
/* Disable Clock PM */
return 1;
case POLICY_DEFAULT:
- return link_state->bios_clk_state;
+ return link->clkpm_default;
}
return 0;
}
-static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
+static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
- struct pci_dev *child_dev;
int pos;
u16 reg16;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, ®16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16);
if (enable)
reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
else
reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
}
- link_state->clk_pm_enabled = !!enable;
+ link->clkpm_enabled = !!enable;
}
-static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
+static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- int pos;
+ /* Don't enable Clock PM if the link is not Clock PM capable */
+ if (!link->clkpm_capable && enable)
+ return;
+ /* Need nothing if the specified equals to current state */
+ if (link->clkpm_enabled == enable)
+ return;
+ pcie_set_clkpm_nocheck(link, enable);
+}
+
+static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+{
+ int pos, capable = 1, enabled = 1;
u32 reg32;
u16 reg16;
- int capable = 1, enabled = 1;
- struct pci_dev *child_dev;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* All functions should have the same cap and state, take the worst */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, ®32);
+ pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, ®32);
if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
capable = 0;
enabled = 0;
break;
}
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, ®16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, ®16);
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
- link_state->clk_pm_enabled = enabled;
- link_state->bios_clk_state = enabled;
- if (!blacklist) {
- link_state->clk_pm_capable = capable;
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
- } else {
- link_state->clk_pm_capable = 0;
- pcie_set_clock_pm(pdev, 0);
- }
+ link->clkpm_enabled = enabled;
+ link->clkpm_default = enabled;
+ link->clkpm_capable = (blacklist) ? 0 : capable;
}
-static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
+static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
{
- struct pci_dev *child_dev;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
return true;
}
return false;
@@ -184,289 +181,263 @@
* could use common clock. If they are, configure them to use the
* common clock. That will reduce the ASPM state exit latency.
*/
-static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
+static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
- int pos, child_pos, i = 0;
- u16 reg16 = 0;
- struct pci_dev *child_dev;
- int same_clock = 1;
+ int ppos, cpos, same_clock = 1;
+ u16 reg16, parent_reg, child_reg[8];
unsigned long start_jiffies;
- u16 child_regs[8], parent_reg;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/*
- * all functions of a slot should have the same Slot Clock
+ * All functions of a slot should have the same Slot Clock
* Configuration, so just check one function
- * */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- BUG_ON(!child_dev->is_pcie);
+ */
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ BUG_ON(!child->is_pcie);
/* Check downstream component if bit Slot Clock Configuration is 1 */
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, ®16);
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, ®16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Check upstream component if bit Slot Clock Configuration is 1 */
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16);
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Configure downstream component, all functions */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- ®16);
- child_regs[i] = reg16;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, ®16);
+ child_reg[PCI_FUNC(child->devfn)] = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- reg16);
- i++;
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
}
/* Configure upstream component */
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, ®16);
parent_reg = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* retrain link */
+ /* Retrain link */
reg16 |= PCI_EXP_LNKCTL_RL;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* Wait for link training end */
- /* break out after waiting for timeout */
+ /* Wait for link training end. Break out after waiting for timeout */
start_jiffies = jiffies;
for (;;) {
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, ®16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, ®16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
break;
msleep(1);
}
- /* training failed -> recover */
- if (reg16 & PCI_EXP_LNKSTA_LT) {
- dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
- " common clock\n");
- i = 0;
- list_for_each_entry(child_dev, &pdev->subordinate->devices,
- bus_list) {
- child_pos = pci_find_capability(child_dev,
- PCI_CAP_ID_EXP);
- pci_write_config_word(child_dev,
- child_pos + PCI_EXP_LNKCTL,
- child_regs[i]);
- i++;
- }
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ return;
+
+ /* Training failed. Restore common clock configurations */
+ dev_printk(KERN_ERR, &parent->dev,
+ "ASPM: Could not configure common clock\n");
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
+ child_reg[PCI_FUNC(child->devfn)]);
}
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
}
-/*
- * calc_L0S_latency: Convert L0s latency encoding to ns
- */
-static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
+/* Convert L0s latency encoding to ns */
+static u32 calc_l0s_latency(u32 encoding)
{
- unsigned int ns = 64;
-
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 5*1000; /* > 4us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+ if (encoding == 0x7)
+ return (5 * 1000); /* > 4us */
+ return (64 << encoding);
}
-/*
- * calc_L1_latency: Convert L1 latency encoding to ns
- */
-static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
+/* Convert L0s acceptable latency encoding to ns */
+static u32 calc_l0s_acceptable(u32 encoding)
{
- unsigned int ns = 1000;
+ if (encoding == 0x7)
+ return -1U;
+ return (64 << encoding);
+}
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 65*1000; /* > 64us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+/* Convert L1 latency encoding to ns */
+static u32 calc_l1_latency(u32 encoding)
+{
+ if (encoding == 0x7)
+ return (65 * 1000); /* > 64us */
+ return (1000 << encoding);
+}
+
+/* Convert L1 acceptable latency encoding to ns */
+static u32 calc_l1_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (1000 << encoding);
}
static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- unsigned int *l0s, unsigned int *l1, unsigned int *enabled)
+ u32 *l0s, u32 *l1, u32 *enabled)
{
int pos;
u16 reg16;
- u32 reg32;
- unsigned int latency;
+ u32 reg32, encoding;
+ *l0s = *l1 = *enabled = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, ®32);
*state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S))
+ *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
*state = 0;
if (*state == 0)
return;
- latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_L0S_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ *l0s = calc_l0s_latency(encoding);
if (*state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_L1_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ *l1 = calc_l1_latency(encoding);
}
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, ®16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1);
+ *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
}
-static void pcie_aspm_cap_init(struct pci_dev *pdev)
+static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- struct pci_dev *child_dev;
- u32 state, tmp;
- struct pcie_link_state *link_state = pdev->link_state;
+ u32 support, l0s, l1, enabled;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+
+ if (blacklist) {
+ /* Set support state to 0, so we will disable ASPM later */
+ link->aspm_support = 0;
+ link->aspm_default = 0;
+ link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ return;
+ }
+
+ /* Configure common clock before checking latencies */
+ pcie_aspm_configure_common_clock(link);
/* upstream component states */
- pcie_aspm_get_cap_device(pdev, &link_state->support_state,
- &link_state->l0s_upper_latency,
- &link_state->l1_upper_latency,
- &link_state->enabled_state);
+ pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
+ link->aspm_support = support;
+ link->latency.l0s = l0s;
+ link->latency.l1 = l1;
+ link->aspm_enabled = enabled;
+
/* downstream component states, all functions have the same setting */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- pcie_aspm_get_cap_device(child_dev, &state,
- &link_state->l0s_down_latency,
- &link_state->l1_down_latency,
- &tmp);
- link_state->support_state &= state;
- if (!link_state->support_state)
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
+ link->aspm_support &= support;
+ link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
+ link->latency.l1 = max_t(u32, link->latency.l1, l1);
+
+ if (!link->aspm_support)
return;
- link_state->enabled_state &= link_state->support_state;
- link_state->bios_aspm_state = link_state->enabled_state;
+
+ link->aspm_enabled &= link->aspm_support;
+ link->aspm_default = link->aspm_enabled;
/* ENDPOINT states*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
int pos;
- u32 reg32;
- unsigned int latency;
- struct endpoint_state *ep_state =
- &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
+ u32 reg32, encoding;
+ struct aspm_latency *acceptable =
+ &link->acceptable[PCI_FUNC(child->devfn)];
- if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)
+ if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END)
continue;
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, ®32);
- latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- latency = calc_L0S_latency(latency, 1);
- ep_state->l0s_acceptable_latency = latency;
- if (link_state->support_state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- latency = calc_L1_latency(latency, 1);
- ep_state->l1_acceptable_latency = latency;
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32);
+ encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
+ acceptable->l0s = calc_l0s_acceptable(encoding);
+ if (link->aspm_support & PCIE_LINK_STATE_L1) {
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
}
}
}
-static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev,
- unsigned int state)
+/**
+ * __pcie_aspm_check_state_one - check latency for endpoint device.
+ * @endpoint: pointer to the struct pci_dev of endpoint device
+ *
+ * TBD: The latency from the endpoint to root complex vary per switch's
+ * upstream link state above the device. Here we just do a simple check
+ * which assumes all links above the device can be in L1 state, that
+ * is we just consider the worst case. If switch's upstream link can't
+ * be put into L0S/L1, then our check is too strictly.
+ */
+static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
{
- struct pci_dev *parent_dev, *tmp_dev;
- unsigned int latency, l1_latency = 0;
- struct pcie_link_state *link_state;
- struct endpoint_state *ep_state;
+ u32 l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
- parent_dev = pdev->bus->self;
- link_state = parent_dev->link_state;
- state &= link_state->support_state;
- if (state == 0)
- return 0;
- ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)];
+ link = endpoint->bus->self->link_state;
+ state &= link->aspm_support;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
- /*
- * Check latency for endpoint device.
- * TBD: The latency from the endpoint to root complex vary per
- * switch's upstream link state above the device. Here we just do a
- * simple check which assumes all links above the device can be in L1
- * state, that is we just consider the worst case. If switch's upstream
- * link can't be put into L0S/L1, then our check is too strictly.
- */
- tmp_dev = pdev;
- while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
- parent_dev = tmp_dev->bus->self;
- link_state = parent_dev->link_state;
- if (state & PCIE_LINK_STATE_L0S) {
- latency = max_t(unsigned int,
- link_state->l0s_upper_latency,
- link_state->l0s_down_latency);
- if (latency > ep_state->l0s_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L0S;
- }
- if (state & PCIE_LINK_STATE_L1) {
- latency = max_t(unsigned int,
- link_state->l1_upper_latency,
- link_state->l1_down_latency);
- if (latency + l1_latency >
- ep_state->l1_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L1;
- }
- if (!parent_dev->bus->self) /* parent_dev is a root port */
- break;
- else {
- /*
- * parent_dev is the downstream port of a switch, make
- * tmp_dev the upstream port of the switch
- */
- tmp_dev = parent_dev->bus->self;
- /*
- * every switch on the path to root complex need 1 more
- * microsecond for L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- l1_latency += 1000;
- }
+ while (link && state) {
+ if ((state & PCIE_LINK_STATE_L0S) &&
+ (link->latency.l0s > acceptable->l0s))
+ state &= ~PCIE_LINK_STATE_L0S;
+ if ((state & PCIE_LINK_STATE_L1) &&
+ (link->latency.l1 + l1_switch_latency > acceptable->l1))
+ state &= ~PCIE_LINK_STATE_L1;
+ link = link->parent;
+ /*
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ l1_switch_latency += 1000;
}
return state;
}
-static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
- unsigned int state)
+static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
+ pci_power_t power_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* If no child, ignore the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
return state;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- * */
- state = 0;
- break;
- }
- if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END))
+
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ /*
+ * If downstream component of a link is pci bridge, we
+ * disable ASPM for now for the link
+ */
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END))
continue;
/* Device not in D0 doesn't need check latency */
- if (child_dev->current_state == PCI_D1 ||
- child_dev->current_state == PCI_D2 ||
- child_dev->current_state == PCI_D3hot ||
- child_dev->current_state == PCI_D3cold)
+ power_state = child->current_state;
+ if (power_state == PCI_D1 || power_state == PCI_D2 ||
+ power_state == PCI_D3hot || power_state == PCI_D3cold)
continue;
- state = __pcie_aspm_check_state_one(child_dev, state);
+ state = __pcie_aspm_check_state_one(child, state);
}
return state;
}
@@ -482,90 +453,71 @@
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
-static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
+static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
- int valid = 1;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/* If no child, disable the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
state = 0;
/*
- * if the downstream component has pci bridge function, don't do ASPM
- * now
+ * If the downstream component has pci bridge function, don't
+ * do ASPM now.
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- valid = 0;
- break;
- }
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return;
}
- if (!valid)
- return;
-
/*
- * spec 2.0 suggests all functions should be configured the same
- * setting for ASPM. Enabling ASPM L1 should be done in upstream
- * component first and then downstream, and vice versa for disabling
- * ASPM L1. Spec doesn't mention L0S.
+ * Spec 2.0 suggests all functions should be configured the
+ * same setting for ASPM. Enabling ASPM L1 should be done in
+ * upstream component first and then downstream, and vice
+ * versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list)
- __pcie_aspm_config_one_dev(child_dev, state);
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ __pcie_aspm_config_one_dev(child, state);
if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- link_state->enabled_state = state;
+ link->aspm_enabled = state;
}
-static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
+/* Check the whole hierarchy, and configure each link in the hierarchy */
+static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
- struct pcie_link_state *root_port_link = link;
- while (root_port_link->parent)
- root_port_link = root_port_link->parent;
- return root_port_link;
-}
+ struct pcie_link_state *leaf, *root = link->root;
-/* check the whole hierarchy, and configure each link in the hierarchy */
-static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pcie_link_state *link_state = pdev->link_state;
- struct pcie_link_state *root_port_link = get_root_port_link(link_state);
- struct pcie_link_state *leaf;
+ state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
- state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
-
- /* check all links who have specific root port link */
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (!list_empty(&leaf->children) ||
- get_root_port_link(leaf) != root_port_link)
+ /* Check all links who have specific root port link */
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (!list_empty(&leaf->children) || (leaf->root != root))
continue;
- state = pcie_aspm_check_state(leaf->pdev, state);
+ state = pcie_aspm_check_state(leaf, state);
}
- /* check root port link too in case it hasn't children */
- state = pcie_aspm_check_state(root_port_link->pdev, state);
-
- if (link_state->enabled_state == state)
+ /* Check root port link too in case it hasn't children */
+ state = pcie_aspm_check_state(root, state);
+ if (link->aspm_enabled == state)
return;
-
/*
- * we must change the hierarchy. See comments in
+ * We must change the hierarchy. See comments in
* __pcie_aspm_config_link for the order
**/
if (state & PCIE_LINK_STATE_L1) {
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
} else {
- list_for_each_entry_reverse(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry_reverse(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
}
}
@@ -574,45 +526,42 @@
* pcie_aspm_configure_link_state: enable/disable PCI express link state
* @pdev: the root port or switch downstream port
*/
-static void pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
+static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(pdev, state);
+ __pcie_aspm_configure_link_state(link, state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
-static void free_link_state(struct pci_dev *pdev)
+static void free_link_state(struct pcie_link_state *link)
{
- kfree(pdev->link_state);
- pdev->link_state = NULL;
+ link->pdev->link_state = NULL;
+ kfree(link);
}
static int pcie_aspm_sanity_check(struct pci_dev *pdev)
{
- struct pci_dev *child_dev;
- int child_pos;
+ struct pci_dev *child;
+ int pos;
u32 reg32;
-
/*
- * Some functions in a slot might not all be PCIE functions, very
- * strange. Disable ASPM for the whole slot
+ * Some functions in a slot might not all be PCIE functions,
+ * very strange. Disable ASPM for the whole slot
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!child_pos)
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ if (!pos)
return -EINVAL;
-
/*
* Disable ASPM for pre-1.1 PCIe device, we follow MS to use
* RBER bit to determine if a function is 1.1 version device
*/
- pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
- ®32);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, ®32);
if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
- dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM"
+ dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
" on pre-1.1 PCIe device. You can enable it"
" with 'pcie_aspm=force'\n");
return -EINVAL;
@@ -621,6 +570,47 @@
return 0;
}
+static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+ if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ struct pcie_link_state *parent;
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
+ link->parent = parent;
+ list_add(&link->link, &parent->children);
+ }
+ /* Setup a pointer to the root port link */
+ if (!link->parent)
+ link->root = link;
+ else
+ link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+
+ pdev->link_state = link;
+
+ /* Check ASPM capability */
+ pcie_aspm_cap_init(link, blacklist);
+
+ /* Check Clock PM capability */
+ pcie_clkpm_cap_init(link, blacklist);
+
+ return link;
+}
+
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scaned.
@@ -628,75 +618,47 @@
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
- unsigned int state;
- struct pcie_link_state *link_state;
- int error = 0;
- int blacklist;
+ u32 state;
+ struct pcie_link_state *link;
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
return;
+
+ /* VIA has a strange chipset, root port is under a bridge */
+ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->bus->self)
+ return;
+
down_read(&pci_bus_sem);
if (list_empty(&pdev->subordinate->devices))
goto out;
- blacklist = !!pcie_aspm_sanity_check(pdev);
-
mutex_lock(&aspm_lock);
-
- link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
- if (!link_state)
- goto unlock_out;
-
- link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
- INIT_LIST_HEAD(&link_state->children);
- INIT_LIST_HEAD(&link_state->link);
- if (pdev->bus->self) {/* this is a switch */
- struct pcie_link_state *parent_link_state;
-
- parent_link_state = pdev->bus->parent->self->link_state;
- if (!parent_link_state) {
- kfree(link_state);
- goto unlock_out;
- }
- list_add(&link_state->link, &parent_link_state->children);
- link_state->parent = parent_link_state;
- }
-
- pdev->link_state = link_state;
-
- if (!blacklist) {
- pcie_aspm_configure_common_clock(pdev);
- pcie_aspm_cap_init(pdev);
+ link = pcie_aspm_setup_link_state(pdev);
+ if (!link)
+ goto unlock;
+ /*
+ * Setup initial ASPM state
+ *
+ * If link has switch, delay the link config. The leaf link
+ * initialization will config the whole hierarchy. But we must
+ * make sure BIOS doesn't set unsupported link state.
+ */
+ if (pcie_aspm_downstream_has_switch(link)) {
+ state = pcie_aspm_check_state(link, link->aspm_default);
+ __pcie_aspm_config_link(link, state);
} else {
- link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
- link_state->bios_aspm_state = 0;
- /* Set support state to 0, so we will disable ASPM later */
- link_state->support_state = 0;
+ state = policy_to_aspm_state(link);
+ __pcie_aspm_configure_link_state(link, state);
}
- link_state->pdev = pdev;
- list_add(&link_state->sibiling, &link_list);
-
- if (link_state->downstream_has_switch) {
- /*
- * If link has switch, delay the link config. The leaf link
- * initialization will config the whole hierarchy. but we must
- * make sure BIOS doesn't set unsupported link state
- **/
- state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
- __pcie_aspm_config_link(pdev, state);
- } else
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
-
- pcie_check_clock_pm(pdev, blacklist);
-
-unlock_out:
- if (error)
- free_link_state(pdev);
+ /* Setup initial Clock PM state */
+ state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
+ pcie_set_clkpm(link, state);
+unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
@@ -725,11 +687,11 @@
/* All functions are removed, so just disable ASPM for the link */
__pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibiling);
+ list_del(&link_state->sibling);
list_del(&link_state->link);
/* Clock PM is for endpoint device */
- free_link_state(parent);
+ free_link_state(link_state);
out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -749,7 +711,7 @@
* devices changed PM state, we should recheck if latency meets all
* functions' requirement
*/
- pcie_aspm_configure_link_state(pdev, link_state->enabled_state);
+ pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
}
/*
@@ -772,14 +734,12 @@
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link_state = parent->link_state;
- link_state->support_state &=
- ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1));
- if (state & PCIE_LINK_STATE_CLKPM)
- link_state->clk_pm_capable = 0;
-
- __pcie_aspm_configure_link_state(parent, link_state->enabled_state);
- if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
- pcie_set_clock_pm(parent, 0);
+ link_state->aspm_support &= ~state;
+ __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ if (state & PCIE_LINK_STATE_CLKPM) {
+ link_state->clkpm_capable = 0;
+ pcie_set_clkpm(link_state, 0);
+ }
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
@@ -788,7 +748,6 @@
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
- struct pci_dev *pdev;
struct pcie_link_state *link_state;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
@@ -802,14 +761,10 @@
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibiling) {
- pdev = link_state->pdev;
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
- if (link_state->clk_pm_capable &&
- link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-
+ list_for_each_entry(link_state, &link_list, sibling) {
+ __pcie_aspm_configure_link_state(link_state,
+ policy_to_aspm_state(link_state));
+ pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -838,7 +793,7 @@
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->enabled_state);
+ return sprintf(buf, "%d\n", link_state->aspm_enabled);
}
static ssize_t link_state_store(struct device *dev,
@@ -846,7 +801,7 @@
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -854,7 +809,7 @@
state = buf[0]-'0';
if (state >= 0 && state <= 3) {
/* setup link aspm state */
- pcie_aspm_configure_link_state(pci_device, state);
+ pcie_aspm_configure_link_state(pdev->link_state, state);
return n;
}
@@ -868,7 +823,7 @@
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->clk_pm_enabled);
+ return sprintf(buf, "%d\n", link_state->clkpm_enabled);
}
static ssize_t clk_ctl_store(struct device *dev,
@@ -876,7 +831,7 @@
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -885,7 +840,7 @@
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clock_pm(pci_device, !!state);
+ pcie_set_clkpm_nocheck(pdev->link_state, !!state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -904,10 +859,10 @@
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
@@ -920,10 +875,10 @@
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f1ae247..40e75f6 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -193,7 +193,7 @@
res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
if (type == pci_bar_io) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff;
+ mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -237,6 +237,8 @@
dev_printk(KERN_DEBUG, &dev->dev,
"reg %x 64bit mmio: %pR\n", pos, res);
}
+
+ res->flags |= IORESOURCE_MEM_64;
} else {
sz = pci_size(l, sz, mask);
@@ -287,7 +289,7 @@
struct resource *res;
int i;
- if (!child->parent) /* It's a host bus, nothing to read */
+ if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
if (dev->transparent) {
@@ -362,7 +364,10 @@
}
}
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
+ IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bd4253f..56552d7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1133,6 +1133,7 @@
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
case 0x1821: /* M5N notebook */
+ case 0x1897: /* A6L notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
@@ -1163,6 +1164,7 @@
switch (dev->subsystem_device) {
case 0x12bc: /* HP D330L */
case 0x12bd: /* HP D530 */
+ case 0x006a: /* HP Compaq nx9500 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
@@ -2016,6 +2018,28 @@
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
+/* Originally in EDAC sources for i82875P:
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
+{
+ u8 reg;
+
+ if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
+ dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
+ pci_write_config_byte(dev, 0xF4, reg | 0x02);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
+ quirk_unhide_mch_dev6);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
+ quirk_unhide_mch_dev6);
+
+
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 86503c1..176615e 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -32,8 +32,6 @@
static void pci_destroy_dev(struct pci_dev *dev)
{
- pci_stop_dev(dev);
-
/* Remove the device from the device lists, and prevent any further
* list accesses from this device */
down_write(&pci_bus_sem);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 710d4ea..e8cb505 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@
if (pdev->is_pcie)
return NULL;
while (1) {
- if (!pdev->bus->parent)
+ if (pci_is_root_bus(pdev->bus))
break;
pdev = pdev->bus->self;
/* a p2p bridge */
@@ -115,36 +115,6 @@
#ifdef CONFIG_PCI_LEGACY
/**
- * pci_find_slot - locate PCI device from a given PCI slot
- * @bus: number of PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
- * in case of multi-function devices.
- *
- * Given a PCI bus and slot/function number, the desired PCI device
- * is located in system global list of PCI devices. If the device
- * is found, a pointer to its data structure is returned. If no
- * device is found, %NULL is returned.
- *
- * NOTE: Do not use this function any more; use pci_get_slot() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
-{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (dev->bus->number == bus && dev->devfn == devfn) {
- pci_dev_put(dev);
- return dev;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(pci_find_slot);
-
-/**
* pci_find_device - begin or continue searching for a PCI device by vendor/device id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
* @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index a00f854..b636e24 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -58,7 +58,6 @@
res = list->res;
idx = res - &list->dev->resource[0];
if (pci_assign_resource(list->dev, idx)) {
- /* FIXME: get rid of this */
res->start = 0;
res->end = 0;
res->flags = 0;
@@ -143,6 +142,7 @@
struct pci_dev *bridge = bus->self;
struct pci_bus_region region;
u32 l, bu, lu, io_upper16;
+ int pref_mem64;
if (pci_is_enabled(bridge))
return;
@@ -198,16 +198,22 @@
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
/* Set up PREF base/limit. */
+ pref_mem64 = 0;
bu = lu = 0;
pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]);
if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
+ int width = 8;
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- bu = upper_32_bits(region.start);
- lu = upper_32_bits(region.end);
- dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n",
- (unsigned long long)region.start,
- (unsigned long long)region.end);
+ if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
+ pref_mem64 = 1;
+ bu = upper_32_bits(region.start);
+ lu = upper_32_bits(region.end);
+ width = 16;
+ }
+ dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
+ width, (unsigned long long)region.start,
+ width, (unsigned long long)region.end);
}
else {
l = 0x0000fff0;
@@ -215,9 +221,11 @@
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- /* Set the upper 32 bits of PREF base & limit. */
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ if (pref_mem64) {
+ /* Set the upper 32 bits of PREF base & limit. */
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ }
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -255,8 +263,25 @@
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
}
- if (pmem)
+ if (pmem) {
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
+ b_res[2].flags |= IORESOURCE_MEM_64;
+ }
+
+ /* double check if bridge does support 64 bit pref */
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ u32 mem_base_hi, tmp;
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ &mem_base_hi);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ if (!tmp)
+ b_res[2].flags &= ~IORESOURCE_MEM_64;
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ mem_base_hi);
+ }
}
/* Helper function for sizing routines: find first available
@@ -336,6 +361,7 @@
resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
+ unsigned int mem64_mask = 0;
if (!b_res)
return 0;
@@ -344,9 +370,12 @@
max_order = 0;
size = 0;
+ mem64_mask = b_res->flags & IORESOURCE_MEM_64;
+ b_res->flags &= ~IORESOURCE_MEM_64;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
-
+
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *r = &dev->resource[i];
resource_size_t r_size;
@@ -372,6 +401,7 @@
aligns[order] += align;
if (order > max_order)
max_order = order;
+ mem64_mask &= r->flags & IORESOURCE_MEM_64;
}
}
@@ -396,6 +426,7 @@
b_res->start = min_align;
b_res->end = size + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
+ b_res->flags |= mem64_mask;
return 1;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1240351..b711fb7 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -135,23 +135,16 @@
}
#endif /* CONFIG_PCI_QUIRKS */
-int pci_assign_resource(struct pci_dev *dev, int resno)
+static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
+ int resno)
{
- struct pci_bus *bus = dev->bus;
struct resource *res = dev->resource + resno;
resource_size_t size, min, align;
int ret;
size = resource_size(res);
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
-
align = resource_alignment(res);
- if (!align) {
- dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
- "alignment) %pR flags %#lx\n",
- resno, res, res->flags);
- return -EINVAL;
- }
/* First, try exact prefetching match.. */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -169,10 +162,7 @@
pcibios_align_resource, dev);
}
- if (ret) {
- dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
- resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
- } else {
+ if (!ret) {
res->flags &= ~IORESOURCE_STARTALIGN;
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -181,6 +171,39 @@
return ret;
}
+int pci_assign_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t align;
+ struct pci_bus *bus;
+ int ret;
+
+ align = resource_alignment(res);
+ if (!align) {
+ dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
+ "alignment) %pR flags %#lx\n",
+ resno, res, res->flags);
+ return -EINVAL;
+ }
+
+ bus = dev->bus;
+ while ((ret = __pci_assign_resource(bus, dev, resno))) {
+ if (bus->parent && bus->self->transparent)
+ bus = bus->parent;
+ else
+ bus = NULL;
+ if (bus)
+ continue;
+ break;
+ }
+
+ if (ret)
+ dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
+ resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
+
+ return ret;
+}
+
#if 0
int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index fe95ce2..eddb074 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -307,6 +307,45 @@
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+#include <linux/pci_hotplug.h>
+/**
+ * pci_hp_create_link - create symbolic link to the hotplug driver module.
+ * @slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to create symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_create_module_link(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ struct kobject *kobj = NULL;
+ int no_warn;
+
+ if (!slot || !slot->ops)
+ return;
+ kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ if (!kobj)
+ return;
+ no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
+
+/**
+ * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
+ * @slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to remove symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_remove_module_link(struct pci_slot *pci_slot)
+{
+ sysfs_remove_link(&pci_slot->kobj, "module");
+}
+EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
+#endif
+
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c682ac5..fee6a40 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -34,10 +34,27 @@
If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
here.
+config ACERHDF
+ tristate "Acer Aspire One temperature and fan driver"
+ depends on THERMAL && THERMAL_HWMON && ACPI
+ ---help---
+ This is a driver for Acer Aspire One netbooks. It allows to access
+ the temperature sensor and to control the fan.
+
+ After loading this driver the BIOS is still in control of the fan.
+ To let the kernel handle the fan, do:
+ echo -n enabled > /sys/class/thermal/thermal_zone0/mode
+
+ For more information about this driver see
+ <http://piie.net/files/acerhdf_README.txt>
+
+ If you have an Acer Aspire One netbook, say Y or M
+ here.
+
config ASUS_LAPTOP
- tristate "Asus Laptop Extras (EXPERIMENTAL)"
+ tristate "Asus Laptop Extras"
depends on ACPI
- depends on EXPERIMENTAL && !ACPI_ASUS
+ depends on !ACPI_ASUS
select LEDS_CLASS
select NEW_LEDS
select BACKLIGHT_CLASS_DEVICE
@@ -45,12 +62,12 @@
---help---
This is the new Linux driver for Asus laptops. It may also support some
MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
- standard ACPI events that go through /proc/acpi/events. It also adds
+ standard ACPI events and input events. It also adds
support for video output switching, LCD backlight control, Bluetooth and
Wlan control, and most importantly, allows you to blink those fancy LEDs.
For more information and a userspace daemon for handling the extra
- buttons see <http://acpi4asus.sf.net/>.
+ buttons see <http://acpi4asus.sf.net>.
If you have an ACPI-compatible ASUS laptop, say Y or M here.
@@ -340,9 +357,14 @@
depends on RFKILL || RFKILL = n
select BACKLIGHT_CLASS_DEVICE
select HWMON
+ select HOTPLUG
+ select HOTPLUG_PCI if PCI
---help---
This driver supports the Fn-Fx keys on Eee PC laptops.
- It also adds the ability to switch camera/wlan on/off.
+
+ It also gives access to some extra laptop functionalities like
+ Bluetooth, backlight and allows powering on/off some other
+ devices.
If you have an Eee PC laptop, say Y or M here.
@@ -369,7 +391,7 @@
any ACPI-WMI devices.
config ACPI_ASUS
- tristate "ASUS/Medion Laptop Extras"
+ tristate "ASUS/Medion Laptop Extras (DEPRECATED)"
depends on ACPI
select BACKLIGHT_CLASS_DEVICE
---help---
@@ -390,7 +412,7 @@
parameters.
More information and a userspace daemon for handling the extra buttons
- at <http://sourceforge.net/projects/acpi4asus/>.
+ at <http://acpi4asus.sf.net>.
If you have an ACPI-compatible ASUS laptop, say Y or M here. This
driver is still under development, so if your laptop is unsupported or
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index e40c7bd..641b8bf 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -9,6 +9,7 @@
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
obj-$(CONFIG_ACER_WMI) += acer-wmi.o
+obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 09a503e..be2fd6f 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -958,12 +958,12 @@
status = get_u32(&state, ACER_CAP_WIRELESS);
if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(wireless_rfkill, !!state);
+ rfkill_set_sw_state(wireless_rfkill, !state);
if (has_cap(ACER_CAP_BLUETOOTH)) {
status = get_u32(&state, ACER_CAP_BLUETOOTH);
if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(bluetooth_rfkill, !!state);
+ rfkill_set_sw_state(bluetooth_rfkill, !state);
}
schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
new file mode 100644
index 0000000..bdfee17
--- /dev/null
+++ b/drivers/platform/x86/acerhdf.c
@@ -0,0 +1,602 @@
+/*
+ * acerhdf - A driver which monitors the temperature
+ * of the aspire one netbook, turns on/off the fan
+ * as soon as the upper/lower threshold is reached.
+ *
+ * (C) 2009 - Peter Feuerer peter (a) piie.net
+ * http://piie.net
+ * 2009 Borislav Petkov <petkovbb@gmail.com>
+ *
+ * Inspired by and many thanks to:
+ * o acerfand - Rachel Greenham
+ * o acer_ec.pl - Michael Kurz michi.kurz (at) googlemail.com
+ * - Petr Tomasek tomasek (#) etf,cuni,cz
+ * - Carlos Corbacho cathectic (at) gmail.com
+ * o lkml - Matthew Garrett
+ * - Borislav Petkov
+ * - Andreas Mohr
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define pr_fmt(fmt) "acerhdf: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/dmi.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/sched.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/*
+ * The driver is started with "kernel mode off" by default. That means, the BIOS
+ * is still in control of the fan. In this mode the driver allows to read the
+ * temperature of the cpu and a userspace tool may take over control of the fan.
+ * If the driver is switched to "kernel mode" (e.g. via module parameter) the
+ * driver is in full control of the fan. If you want the module to be started in
+ * kernel mode by default, define the following:
+ */
+#undef START_IN_KERNEL_MODE
+
+#define DRV_VER "0.5.13"
+
+/*
+ * According to the Atom N270 datasheet,
+ * (http://download.intel.com/design/processor/datashts/320032.pdf) the
+ * CPU's optimal operating limits denoted in junction temperature as
+ * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So,
+ * assume 89°C is critical temperature.
+ */
+#define ACERHDF_TEMP_CRIT 89
+#define ACERHDF_FAN_OFF 0
+#define ACERHDF_FAN_AUTO 1
+
+/*
+ * No matter what value the user puts into the fanon variable, turn on the fan
+ * at 80 degree Celsius to prevent hardware damage
+ */
+#define ACERHDF_MAX_FANON 80
+
+/*
+ * Maximum interval between two temperature checks is 15 seconds, as the die
+ * can get hot really fast under heavy load (plus we shouldn't forget about
+ * possible impact of _external_ aggressive sources such as heaters, sun etc.)
+ */
+#define ACERHDF_MAX_INTERVAL 15
+
+#ifdef START_IN_KERNEL_MODE
+static int kernelmode = 1;
+#else
+static int kernelmode;
+#endif
+
+static unsigned int interval = 10;
+static unsigned int fanon = 63;
+static unsigned int fanoff = 58;
+static unsigned int verbose;
+static unsigned int fanstate = ACERHDF_FAN_AUTO;
+static char force_bios[16];
+static unsigned int prev_interval;
+struct thermal_zone_device *thz_dev;
+struct thermal_cooling_device *cl_dev;
+struct platform_device *acerhdf_dev;
+
+module_param(kernelmode, uint, 0);
+MODULE_PARM_DESC(kernelmode, "Kernel mode fan control on / off");
+module_param(interval, uint, 0600);
+MODULE_PARM_DESC(interval, "Polling interval of temperature check");
+module_param(fanon, uint, 0600);
+MODULE_PARM_DESC(fanon, "Turn the fan on above this temperature");
+module_param(fanoff, uint, 0600);
+MODULE_PARM_DESC(fanoff, "Turn the fan off below this temperature");
+module_param(verbose, uint, 0600);
+MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
+module_param_string(force_bios, force_bios, 16, 0);
+MODULE_PARM_DESC(force_bios, "Force BIOS version and omit BIOS check");
+
+/* BIOS settings */
+struct bios_settings_t {
+ const char *vendor;
+ const char *version;
+ unsigned char fanreg;
+ unsigned char tempreg;
+ unsigned char fancmd[2]; /* fan off and auto commands */
+};
+
+/* Register addresses and values for different BIOS versions */
+static const struct bios_settings_t bios_tbl[] = {
+ {"Acer", "v0.3109", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "v0.3114", 0x55, 0x58, {0x1f, 0x00} },
+ {"Acer", "v0.3301", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3304", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3305", 0x55, 0x58, {0xaf, 0x00} },
+ {"Acer", "v0.3308", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "v0.3309", 0x55, 0x58, {0x21, 0x00} },
+ {"Acer", "v0.3310", 0x55, 0x58, {0x21, 0x00} },
+ {"Gateway", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
+ {"", "", 0, 0, {0, 0} }
+};
+
+static const struct bios_settings_t *bios_cfg __read_mostly;
+
+
+static int acerhdf_get_temp(int *temp)
+{
+ u8 read_temp;
+
+ if (ec_read(bios_cfg->tempreg, &read_temp))
+ return -EINVAL;
+
+ *temp = read_temp;
+
+ return 0;
+}
+
+static int acerhdf_get_fanstate(int *state)
+{
+ u8 fan;
+ bool tmp;
+
+ if (ec_read(bios_cfg->fanreg, &fan))
+ return -EINVAL;
+
+ tmp = (fan == bios_cfg->fancmd[ACERHDF_FAN_OFF]);
+ *state = tmp ? ACERHDF_FAN_OFF : ACERHDF_FAN_AUTO;
+
+ return 0;
+}
+
+static void acerhdf_change_fanstate(int state)
+{
+ unsigned char cmd;
+
+ if (verbose)
+ pr_notice("fan %s\n", (state == ACERHDF_FAN_OFF) ?
+ "OFF" : "ON");
+
+ if ((state != ACERHDF_FAN_OFF) && (state != ACERHDF_FAN_AUTO)) {
+ pr_err("invalid fan state %d requested, setting to auto!\n",
+ state);
+ state = ACERHDF_FAN_AUTO;
+ }
+
+ cmd = bios_cfg->fancmd[state];
+ fanstate = state;
+
+ ec_write(bios_cfg->fanreg, cmd);
+}
+
+static void acerhdf_check_param(struct thermal_zone_device *thermal)
+{
+ if (fanon > ACERHDF_MAX_FANON) {
+ pr_err("fanon temperature too high, set to %d\n",
+ ACERHDF_MAX_FANON);
+ fanon = ACERHDF_MAX_FANON;
+ }
+
+ if (kernelmode && prev_interval != interval) {
+ if (interval > ACERHDF_MAX_INTERVAL) {
+ pr_err("interval too high, set to %d\n",
+ ACERHDF_MAX_INTERVAL);
+ interval = ACERHDF_MAX_INTERVAL;
+ }
+ if (verbose)
+ pr_notice("interval changed to: %d\n",
+ interval);
+ thermal->polling_delay = interval*1000;
+ prev_interval = interval;
+ }
+}
+
+/*
+ * This is the thermal zone callback which does the delayed polling of the fan
+ * state. We do check /sysfs-originating settings here in acerhdf_check_param()
+ * as late as the polling interval is since we can't do that in the respective
+ * accessors of the module parameters.
+ */
+static int acerhdf_get_ec_temp(struct thermal_zone_device *thermal,
+ unsigned long *t)
+{
+ int temp, err = 0;
+
+ acerhdf_check_param(thermal);
+
+ err = acerhdf_get_temp(&temp);
+ if (err)
+ return err;
+
+ if (verbose)
+ pr_notice("temp %d\n", temp);
+
+ *t = temp;
+ return 0;
+}
+
+static int acerhdf_bind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ /* if the cooling device is the one from acerhdf bind it */
+ if (cdev != cl_dev)
+ return 0;
+
+ if (thermal_zone_bind_cooling_device(thermal, 0, cdev)) {
+ pr_err("error binding cooling dev\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int acerhdf_unbind(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ if (cdev != cl_dev)
+ return 0;
+
+ if (thermal_zone_unbind_cooling_device(thermal, 0, cdev)) {
+ pr_err("error unbinding cooling dev\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void acerhdf_revert_to_bios_mode(void)
+{
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ kernelmode = 0;
+ if (thz_dev)
+ thz_dev->polling_delay = 0;
+ pr_notice("kernel mode fan control OFF\n");
+}
+static inline void acerhdf_enable_kernelmode(void)
+{
+ kernelmode = 1;
+
+ thz_dev->polling_delay = interval*1000;
+ thermal_zone_device_update(thz_dev);
+ pr_notice("kernel mode fan control ON\n");
+}
+
+static int acerhdf_get_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode *mode)
+{
+ if (verbose)
+ pr_notice("kernel mode fan control %d\n", kernelmode);
+
+ *mode = (kernelmode) ? THERMAL_DEVICE_ENABLED
+ : THERMAL_DEVICE_DISABLED;
+
+ return 0;
+}
+
+/*
+ * set operation mode;
+ * enabled: the thermal layer of the kernel takes care about
+ * the temperature and the fan.
+ * disabled: the BIOS takes control of the fan.
+ */
+static int acerhdf_set_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode mode)
+{
+ if (mode == THERMAL_DEVICE_DISABLED && kernelmode)
+ acerhdf_revert_to_bios_mode();
+ else if (mode == THERMAL_DEVICE_ENABLED && !kernelmode)
+ acerhdf_enable_kernelmode();
+
+ return 0;
+}
+
+static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
+ enum thermal_trip_type *type)
+{
+ if (trip == 0)
+ *type = THERMAL_TRIP_ACTIVE;
+
+ return 0;
+}
+
+static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
+ unsigned long *temp)
+{
+ if (trip == 0)
+ *temp = fanon;
+
+ return 0;
+}
+
+static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
+ unsigned long *temperature)
+{
+ *temperature = ACERHDF_TEMP_CRIT;
+ return 0;
+}
+
+/* bind callback functions to thermalzone */
+struct thermal_zone_device_ops acerhdf_dev_ops = {
+ .bind = acerhdf_bind,
+ .unbind = acerhdf_unbind,
+ .get_temp = acerhdf_get_ec_temp,
+ .get_mode = acerhdf_get_mode,
+ .set_mode = acerhdf_set_mode,
+ .get_trip_type = acerhdf_get_trip_type,
+ .get_trip_temp = acerhdf_get_trip_temp,
+ .get_crit_temp = acerhdf_get_crit_temp,
+};
+
+
+/*
+ * cooling device callback functions
+ * get maximal fan cooling state
+ */
+static int acerhdf_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 1;
+
+ return 0;
+}
+
+static int acerhdf_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ int err = 0, tmp;
+
+ err = acerhdf_get_fanstate(&tmp);
+ if (err)
+ return err;
+
+ *state = (tmp == ACERHDF_FAN_AUTO) ? 1 : 0;
+ return 0;
+}
+
+/* change current fan state - is overwritten when running in kernel mode */
+static int acerhdf_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ int cur_temp, cur_state, err = 0;
+
+ if (!kernelmode)
+ return 0;
+
+ err = acerhdf_get_temp(&cur_temp);
+ if (err) {
+ pr_err("error reading temperature, hand off control to BIOS\n");
+ goto err_out;
+ }
+
+ err = acerhdf_get_fanstate(&cur_state);
+ if (err) {
+ pr_err("error reading fan state, hand off control to BIOS\n");
+ goto err_out;
+ }
+
+ if (state == 0) {
+ /* turn fan off only if below fanoff temperature */
+ if ((cur_state == ACERHDF_FAN_AUTO) &&
+ (cur_temp < fanoff))
+ acerhdf_change_fanstate(ACERHDF_FAN_OFF);
+ } else {
+ if (cur_state == ACERHDF_FAN_OFF)
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ }
+ return 0;
+
+err_out:
+ acerhdf_revert_to_bios_mode();
+ return -EINVAL;
+}
+
+/* bind fan callbacks to fan device */
+struct thermal_cooling_device_ops acerhdf_cooling_ops = {
+ .get_max_state = acerhdf_get_max_state,
+ .get_cur_state = acerhdf_get_cur_state,
+ .set_cur_state = acerhdf_set_cur_state,
+};
+
+/* suspend / resume functionality */
+static int acerhdf_suspend(struct platform_device *dev, pm_message_t state)
+{
+ if (kernelmode)
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+
+ if (verbose)
+ pr_notice("going suspend\n");
+
+ return 0;
+}
+
+static int acerhdf_resume(struct platform_device *device)
+{
+ if (verbose)
+ pr_notice("resuming\n");
+
+ return 0;
+}
+
+static int __devinit acerhdf_probe(struct platform_device *device)
+{
+ return 0;
+}
+
+static int acerhdf_remove(struct platform_device *device)
+{
+ return 0;
+}
+
+struct platform_driver acerhdf_drv = {
+ .driver = {
+ .name = "acerhdf",
+ .owner = THIS_MODULE,
+ },
+ .probe = acerhdf_probe,
+ .remove = acerhdf_remove,
+ .suspend = acerhdf_suspend,
+ .resume = acerhdf_resume,
+};
+
+
+/* check hardware */
+static int acerhdf_check_hardware(void)
+{
+ char const *vendor, *version, *product;
+ int i;
+
+ /* get BIOS data */
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ version = dmi_get_system_info(DMI_BIOS_VERSION);
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+
+ pr_info("Acer Aspire One Fan driver, v.%s\n", DRV_VER);
+
+ if (!force_bios[0]) {
+ if (strncmp(product, "AO", 2)) {
+ pr_err("no Aspire One hardware found\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_info("forcing BIOS version: %s\n", version);
+ version = force_bios;
+ kernelmode = 0;
+ }
+
+ if (verbose)
+ pr_info("BIOS info: %s %s, product: %s\n",
+ vendor, version, product);
+
+ /* search BIOS version and vendor in BIOS settings table */
+ for (i = 0; bios_tbl[i].version[0]; i++) {
+ if (!strcmp(bios_tbl[i].vendor, vendor) &&
+ !strcmp(bios_tbl[i].version, version)) {
+ bios_cfg = &bios_tbl[i];
+ break;
+ }
+ }
+
+ if (!bios_cfg) {
+ pr_err("unknown (unsupported) BIOS version %s/%s, "
+ "please report, aborting!\n", vendor, version);
+ return -EINVAL;
+ }
+
+ /*
+ * if started with kernel mode off, prevent the kernel from switching
+ * off the fan
+ */
+ if (!kernelmode) {
+ pr_notice("Fan control off, to enable do:\n");
+ pr_notice("echo -n \"enabled\" > "
+ "/sys/class/thermal/thermal_zone0/mode\n");
+ }
+
+ return 0;
+}
+
+static int acerhdf_register_platform(void)
+{
+ int err = 0;
+
+ err = platform_driver_register(&acerhdf_drv);
+ if (err)
+ return err;
+
+ acerhdf_dev = platform_device_alloc("acerhdf", -1);
+ platform_device_add(acerhdf_dev);
+
+ return 0;
+}
+
+static void acerhdf_unregister_platform(void)
+{
+ if (!acerhdf_dev)
+ return;
+
+ platform_device_del(acerhdf_dev);
+ platform_driver_unregister(&acerhdf_drv);
+}
+
+static int acerhdf_register_thermal(void)
+{
+ cl_dev = thermal_cooling_device_register("acerhdf-fan", NULL,
+ &acerhdf_cooling_ops);
+
+ if (IS_ERR(cl_dev))
+ return -EINVAL;
+
+ thz_dev = thermal_zone_device_register("acerhdf", 1, NULL,
+ &acerhdf_dev_ops, 0, 0, 0,
+ (kernelmode) ? interval*1000 : 0);
+ if (IS_ERR(thz_dev))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void acerhdf_unregister_thermal(void)
+{
+ if (cl_dev) {
+ thermal_cooling_device_unregister(cl_dev);
+ cl_dev = NULL;
+ }
+
+ if (thz_dev) {
+ thermal_zone_device_unregister(thz_dev);
+ thz_dev = NULL;
+ }
+}
+
+static int __init acerhdf_init(void)
+{
+ int err = 0;
+
+ err = acerhdf_check_hardware();
+ if (err)
+ goto out_err;
+
+ err = acerhdf_register_platform();
+ if (err)
+ goto err_unreg;
+
+ err = acerhdf_register_thermal();
+ if (err)
+ goto err_unreg;
+
+ return 0;
+
+err_unreg:
+ acerhdf_unregister_thermal();
+ acerhdf_unregister_platform();
+
+out_err:
+ return -ENODEV;
+}
+
+static void __exit acerhdf_exit(void)
+{
+ acerhdf_change_fanstate(ACERHDF_FAN_AUTO);
+ acerhdf_unregister_thermal();
+ acerhdf_unregister_platform();
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Peter Feuerer");
+MODULE_DESCRIPTION("Aspire One temperature and fan driver");
+MODULE_ALIAS("dmi:*:*Acer*:*:");
+MODULE_ALIAS("dmi:*:*Gateway*:*:");
+MODULE_ALIAS("dmi:*:*Packard Bell*:*:");
+
+module_init(acerhdf_init);
+module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index bfc1a88..db657bb 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -33,6 +33,8 @@
* Sam Lin - GPS support
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -53,9 +55,10 @@
#define ASUS_HOTK_NAME "Asus Laptop Support"
#define ASUS_HOTK_CLASS "hotkey"
#define ASUS_HOTK_DEVICE_NAME "Hotkey"
-#define ASUS_HOTK_FILE "asus-laptop"
+#define ASUS_HOTK_FILE KBUILD_MODNAME
#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
+
/*
* Some events we use, same for all Asus
*/
@@ -207,13 +210,17 @@
static int asus_hotk_add(struct acpi_device *device);
static int asus_hotk_remove(struct acpi_device *device, int type);
+static void asus_hotk_notify(struct acpi_device *device, u32 event);
+
static struct acpi_driver asus_hotk_driver = {
.name = ASUS_HOTK_NAME,
.class = ASUS_HOTK_CLASS,
.ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = asus_hotk_add,
.remove = asus_hotk_remove,
+ .notify = asus_hotk_notify,
},
};
@@ -323,7 +330,7 @@
rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading Wireless status\n");
+ pr_warning("Error reading Wireless status\n");
else
return (status & mask) ? 1 : 0;
@@ -337,7 +344,7 @@
rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading GPS status\n");
+ pr_warning("Error reading GPS status\n");
else
return status ? 1 : 0;
@@ -377,7 +384,7 @@
}
if (write_acpi_int(handle, NULL, out, NULL))
- printk(ASUS_WARNING " write failed %x\n", mask);
+ pr_warning(" write failed %x\n", mask);
}
/* /sys/class/led handlers */
@@ -420,7 +427,7 @@
NULL, NULL, NULL);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Error switching LCD\n");
+ pr_warning("Error switching LCD\n");
}
write_status(NULL, lcd, LCD_ON);
@@ -444,7 +451,7 @@
rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading brightness\n");
+ pr_warning("Error reading brightness\n");
return value;
}
@@ -457,7 +464,7 @@
/* 0 <= value <= 15 */
if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
- printk(ASUS_WARNING "Error changing brightness\n");
+ pr_warning("Error changing brightness\n");
ret = -EIO;
}
@@ -587,7 +594,7 @@
rv = parse_arg(buf, count, &value);
if (rv > 0) {
if (write_acpi_int(ledd_set_handle, NULL, value, NULL))
- printk(ASUS_WARNING "LED display write failed\n");
+ pr_warning("LED display write failed\n");
else
hotk->ledd_status = (u32) value;
}
@@ -632,7 +639,7 @@
{
/* no sanity check needed for now */
if (write_acpi_int(display_set_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting display\n");
+ pr_warning("Error setting display\n");
return;
}
@@ -647,7 +654,7 @@
rv = acpi_evaluate_integer(display_get_handle, NULL,
NULL, &value);
if (ACPI_FAILURE(rv))
- printk(ASUS_WARNING "Error reading display status\n");
+ pr_warning("Error reading display status\n");
}
value &= 0x0F; /* needed for some models, shouldn't hurt others */
@@ -689,7 +696,7 @@
static void set_light_sens_switch(int value)
{
if (write_acpi_int(ls_switch_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting light sensor switch\n");
+ pr_warning("Error setting light sensor switch\n");
hotk->light_switch = value;
}
@@ -714,7 +721,7 @@
static void set_light_sens_level(int value)
{
if (write_acpi_int(ls_level_handle, NULL, value, NULL))
- printk(ASUS_WARNING "Error setting light sensor level\n");
+ pr_warning("Error setting light sensor level\n");
hotk->light_level = value;
}
@@ -812,7 +819,7 @@
return -EINVAL;
}
-static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void asus_hotk_notify(struct acpi_device *device, u32 event)
{
static struct key_entry *key;
u16 count;
@@ -975,11 +982,11 @@
*/
status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Couldn't get the DSDT table header\n");
+ pr_warning("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
- printk(ASUS_ERR "Hotkey initialization failed\n");
+ pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
@@ -987,9 +994,9 @@
status =
acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
- printk(ASUS_WARNING "Error calling BSTS\n");
+ pr_warning("Error calling BSTS\n");
else if (bsts_result)
- printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n",
+ pr_notice("BSTS called, 0x%02x returned\n",
(uint) bsts_result);
/* This too ... */
@@ -1020,7 +1027,7 @@
return -ENOMEM;
if (*string)
- printk(ASUS_NOTICE " %s model detected\n", string);
+ pr_notice(" %s model detected\n", string);
ASUS_HANDLE_INIT(mled_set);
ASUS_HANDLE_INIT(tled_set);
@@ -1077,7 +1084,7 @@
hotk->inputdev = input_allocate_device();
if (!hotk->inputdev) {
- printk(ASUS_INFO "Unable to allocate input device\n");
+ pr_info("Unable to allocate input device\n");
return 0;
}
hotk->inputdev->name = "Asus Laptop extra buttons";
@@ -1096,7 +1103,7 @@
}
result = input_register_device(hotk->inputdev);
if (result) {
- printk(ASUS_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
input_free_device(hotk->inputdev);
}
return result;
@@ -1113,7 +1120,7 @@
if (hotk->device->status.present) {
result = asus_hotk_get_info();
} else {
- printk(ASUS_ERR "Hotkey device not present, aborting\n");
+ pr_err("Hotkey device not present, aborting\n");
return -EINVAL;
}
@@ -1124,13 +1131,12 @@
static int asus_hotk_add(struct acpi_device *device)
{
- acpi_status status = AE_OK;
int result;
if (!device)
return -EINVAL;
- printk(ASUS_NOTICE "Asus Laptop Support version %s\n",
+ pr_notice("Asus Laptop Support version %s\n",
ASUS_LAPTOP_VERSION);
hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
@@ -1149,15 +1155,6 @@
asus_hotk_add_fs();
- /*
- * We install the handler, it will receive the hotk in parameter, so, we
- * could add other data to the hotk struct
- */
- status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
- asus_hotk_notify, hotk);
- if (ACPI_FAILURE(status))
- printk(ASUS_ERR "Error installing notify handler\n");
-
asus_hotk_found = 1;
/* WLED and BLED are on by default */
@@ -1198,16 +1195,9 @@
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
- asus_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(ASUS_ERR "Error removing notify handler\n");
-
kfree(hotk->name);
kfree(hotk);
@@ -1260,8 +1250,7 @@
bd = backlight_device_register(ASUS_HOTK_FILE, dev,
NULL, &asusbl_ops);
if (IS_ERR(bd)) {
- printk(ASUS_ERR
- "Could not register asus backlight device\n");
+ pr_err("Could not register asus backlight device\n");
asus_backlight_device = NULL;
return PTR_ERR(bd);
}
@@ -1334,7 +1323,6 @@
static int __init asus_laptop_init(void)
{
- struct device *dev;
int result;
if (acpi_disabled)
@@ -1356,24 +1344,10 @@
return -ENODEV;
}
- dev = acpi_get_physical_device(hotk->device->handle);
-
- if (!acpi_video_backlight_support()) {
- result = asus_backlight_init(dev);
- if (result)
- goto fail_backlight;
- } else
- printk(ASUS_INFO "Brightness ignored, must be controlled by "
- "ACPI video driver\n");
-
result = asus_input_init();
if (result)
goto fail_input;
- result = asus_led_init(dev);
- if (result)
- goto fail_led;
-
/* Register platform stuff */
result = platform_driver_register(&asuspf_driver);
if (result)
@@ -1394,8 +1368,27 @@
if (result)
goto fail_sysfs;
+ result = asus_led_init(&asuspf_device->dev);
+ if (result)
+ goto fail_led;
+
+ if (!acpi_video_backlight_support()) {
+ result = asus_backlight_init(&asuspf_device->dev);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Brightness ignored, must be controlled by "
+ "ACPI video driver\n");
+
return 0;
+fail_backlight:
+ asus_led_exit();
+
+fail_led:
+ sysfs_remove_group(&asuspf_device->dev.kobj,
+ &asuspf_attribute_group);
+
fail_sysfs:
platform_device_del(asuspf_device);
@@ -1406,15 +1399,9 @@
platform_driver_unregister(&asuspf_driver);
fail_platform_driver:
- asus_led_exit();
-
-fail_led:
asus_input_exit();
fail_input:
- asus_backlight_exit();
-
-fail_backlight:
return result;
}
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ba1f749..ddf5240 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -455,6 +455,8 @@
*/
static int asus_hotk_add(struct acpi_device *device);
static int asus_hotk_remove(struct acpi_device *device, int type);
+static void asus_hotk_notify(struct acpi_device *device, u32 event);
+
static const struct acpi_device_id asus_device_ids[] = {
{"ATK0100", 0},
{"", 0},
@@ -465,9 +467,11 @@
.name = "asus_acpi",
.class = ACPI_HOTK_CLASS,
.ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = asus_hotk_add,
.remove = asus_hotk_remove,
+ .notify = asus_hotk_notify,
},
};
@@ -1101,12 +1105,20 @@
return 0;
}
-static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void asus_hotk_notify(struct acpi_device *device, u32 event)
{
/* TODO Find a better way to handle events count. */
if (!hotk)
return;
+ /*
+ * The BIOS *should* be sending us device events, but apparently
+ * Asus uses system events instead, so just ignore any device
+ * events we get.
+ */
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
+
if ((event & ~((u32) BR_UP)) < 16)
hotk->brightness = (event & ~((u32) BR_UP));
else if ((event & ~((u32) BR_DOWN)) < 16)
@@ -1346,15 +1358,6 @@
if (result)
goto end;
- /*
- * We install the handler, it will receive the hotk in parameter, so, we
- * could add other data to the hotk struct
- */
- status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
- asus_hotk_notify, hotk);
- if (ACPI_FAILURE(status))
- printk(KERN_ERR " Error installing notify handler\n");
-
/* For laptops without GPLV: init the hotk->brightness value */
if ((!hotk->methods->brightness_get)
&& (!hotk->methods->brightness_status)
@@ -1389,16 +1392,9 @@
static int asus_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
- asus_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(KERN_ERR "Asus ACPI: Error removing notify handler\n");
-
asus_hotk_remove_fs(device);
kfree(hotk);
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 2fab941..0f900cc 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -46,10 +46,53 @@
u16 keycode;
};
-enum { KE_KEY, KE_SW, KE_END };
+enum { KE_KEY, KE_SW, KE_IGNORE, KE_END };
+
+/*
+ * Certain keys are flagged as KE_IGNORE. All of these are either
+ * notifications (rather than requests for change) or are also sent
+ * via the keyboard controller so should not be sent again.
+ */
static struct key_entry dell_wmi_keymap[] = {
{KE_KEY, 0xe045, KEY_PROG1},
+ {KE_KEY, 0xe009, KEY_EJECTCD},
+
+ /* These also contain the brightness level at offset 6 */
+ {KE_KEY, 0xe006, KEY_BRIGHTNESSUP},
+ {KE_KEY, 0xe005, KEY_BRIGHTNESSDOWN},
+
+ /* Battery health status button */
+ {KE_KEY, 0xe007, KEY_BATTERY},
+
+ /* This is actually for all radios. Although physically a
+ * switch, the notification does not provide an indication of
+ * state and so it should be reported as a key */
+ {KE_KEY, 0xe008, KEY_WLAN},
+
+ /* The next device is at offset 6, the active devices are at
+ offset 8 and the attached devices at offset 10 */
+ {KE_KEY, 0xe00b, KEY_DISPLAYTOGGLE},
+
+ {KE_IGNORE, 0xe00c, KEY_KBDILLUMTOGGLE},
+
+ /* BIOS error detected */
+ {KE_IGNORE, 0xe00d, KEY_RESERVED},
+
+ /* Wifi Catcher */
+ {KE_KEY, 0xe011, KEY_PROG2},
+
+ /* Ambient light sensor toggle */
+ {KE_IGNORE, 0xe013, KEY_RESERVED},
+
+ {KE_IGNORE, 0xe020, KEY_MUTE},
+ {KE_IGNORE, 0xe02e, KEY_VOLUMEDOWN},
+ {KE_IGNORE, 0xe030, KEY_VOLUMEUP},
+ {KE_IGNORE, 0xe033, KEY_KBDILLUMUP},
+ {KE_IGNORE, 0xe034, KEY_KBDILLUMDOWN},
+ {KE_IGNORE, 0xe03a, KEY_CAPSLOCK},
+ {KE_IGNORE, 0xe045, KEY_NUMLOCK},
+ {KE_IGNORE, 0xe046, KEY_SCROLLLOCK},
{KE_END, 0}
};
@@ -122,15 +165,20 @@
if (obj && obj->type == ACPI_TYPE_BUFFER) {
int *buffer = (int *)obj->buffer.pointer;
- key = dell_wmi_get_entry_by_scancode(buffer[1]);
+ /*
+ * The upper bytes of the event may contain
+ * additional information, so mask them off for the
+ * scancode lookup
+ */
+ key = dell_wmi_get_entry_by_scancode(buffer[1] & 0xFFFF);
if (key) {
input_report_key(dell_wmi_input_dev, key->keycode, 1);
input_sync(dell_wmi_input_dev);
input_report_key(dell_wmi_input_dev, key->keycode, 0);
input_sync(dell_wmi_input_dev);
- } else
+ } else if (buffer[1] & 0xFFFF)
printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
- buffer[1]);
+ buffer[1] & 0xFFFF);
}
}
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 03bf522..ec560f1 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -16,6 +16,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -31,6 +33,7 @@
#include <linux/input.h>
#include <linux/rfkill.h>
#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
#define EEEPC_LAPTOP_VERSION "0.1"
@@ -40,11 +43,6 @@
#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
#define EEEPC_HOTK_HID "ASUS010"
-#define EEEPC_LOG EEEPC_HOTK_FILE ": "
-#define EEEPC_ERR KERN_ERR EEEPC_LOG
-#define EEEPC_WARNING KERN_WARNING EEEPC_LOG
-#define EEEPC_NOTICE KERN_NOTICE EEEPC_LOG
-#define EEEPC_INFO KERN_INFO EEEPC_LOG
/*
* Definitions for Asus EeePC
@@ -62,7 +60,10 @@
DISABLE_ASL_GPS = 0x0020,
DISABLE_ASL_DISPLAYSWITCH = 0x0040,
DISABLE_ASL_MODEM = 0x0080,
- DISABLE_ASL_CARDREADER = 0x0100
+ DISABLE_ASL_CARDREADER = 0x0100,
+ DISABLE_ASL_3G = 0x0200,
+ DISABLE_ASL_WIMAX = 0x0400,
+ DISABLE_ASL_HWCF = 0x0800
};
enum {
@@ -87,7 +88,13 @@
CM_ASL_USBPORT3,
CM_ASL_MODEM,
CM_ASL_CARDREADER,
- CM_ASL_LID
+ CM_ASL_3G,
+ CM_ASL_WIMAX,
+ CM_ASL_HWCF,
+ CM_ASL_LID,
+ CM_ASL_TYPE,
+ CM_ASL_PANELPOWER, /*P901*/
+ CM_ASL_TPD
};
static const char *cm_getv[] = {
@@ -96,7 +103,8 @@
NULL, "PBLG", NULL, NULL,
"CFVG", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODG",
- "CRDG", "LIDG"
+ "CRDG", "M3GG", "WIMG", "HWCF",
+ "LIDG", "TYPE", "PBPG", "TPDG"
};
static const char *cm_setv[] = {
@@ -105,7 +113,8 @@
"SDSP", "PBLS", "HDPS", NULL,
"CFVS", NULL, NULL, NULL,
"USBG", NULL, NULL, "MODS",
- "CRDS", NULL
+ "CRDS", "M3GS", "WIMS", NULL,
+ NULL, NULL, "PBPS", "TPDS"
};
#define EEEPC_EC "\\_SB.PCI0.SBRG.EC0."
@@ -130,8 +139,10 @@
u16 event_count[128]; /* count for each event */
struct input_dev *inputdev;
u16 *keycode_map;
- struct rfkill *eeepc_wlan_rfkill;
- struct rfkill *eeepc_bluetooth_rfkill;
+ struct rfkill *wlan_rfkill;
+ struct rfkill *bluetooth_rfkill;
+ struct rfkill *wwan3g_rfkill;
+ struct hotplug_slot *hotplug_slot;
};
/* The actual device the driver binds to */
@@ -180,6 +191,8 @@
*/
static int eeepc_hotk_add(struct acpi_device *device);
static int eeepc_hotk_remove(struct acpi_device *device, int type);
+static int eeepc_hotk_resume(struct acpi_device *device);
+static void eeepc_hotk_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id eeepc_device_ids[] = {
{EEEPC_HOTK_HID, 0},
@@ -191,12 +204,24 @@
.name = EEEPC_HOTK_NAME,
.class = EEEPC_HOTK_CLASS,
.ids = eeepc_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = eeepc_hotk_add,
.remove = eeepc_hotk_remove,
+ .resume = eeepc_hotk_resume,
+ .notify = eeepc_hotk_notify,
},
};
+/* PCI hotplug ops */
+static int eeepc_get_adapter_status(struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
+ .owner = THIS_MODULE,
+ .get_adapter_status = eeepc_get_adapter_status,
+ .get_power_status = eeepc_get_adapter_status,
+};
+
/* The backlight device /sys/class/backlight */
static struct backlight_device *eeepc_backlight_device;
@@ -258,20 +283,20 @@
if (method == NULL)
return -ENODEV;
if (write_acpi_int(ehotk->handle, method, value, NULL))
- printk(EEEPC_WARNING "Error writing %s\n", method);
+ pr_warning("Error writing %s\n", method);
}
return 0;
}
static int get_acpi(int cm)
{
- int value = -1;
+ int value = -ENODEV;
if ((ehotk->cm_supported & (0x1 << cm))) {
const char *method = cm_getv[cm];
if (method == NULL)
return -ENODEV;
if (read_acpi_int(ehotk->handle, method, &value))
- printk(EEEPC_WARNING "Error reading %s\n", method);
+ pr_warning("Error reading %s\n", method);
}
return value;
}
@@ -316,6 +341,15 @@
.set_block = eeepc_rfkill_set,
};
+static void __init eeepc_enable_camera(void)
+{
+ /*
+ * If the following call to set_acpi() fails, it's because there's no
+ * camera so we can ignore the error.
+ */
+ set_acpi(CM_ASL_CAMERA, 1);
+}
+
/*
* Sys helpers
*/
@@ -334,13 +368,19 @@
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_acpi(cm, value);
+ value = set_acpi(cm, value);
+ if (value < 0)
+ return value;
return rv;
}
static ssize_t show_sys_acpi(int cm, char *buf)
{
- return sprintf(buf, "%d\n", get_acpi(cm));
+ int value = get_acpi(cm);
+
+ if (value < 0)
+ return value;
+ return sprintf(buf, "%d\n", value);
}
#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm) \
@@ -367,13 +407,88 @@
EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
-EEEPC_CREATE_DEVICE_ATTR(cpufv, CM_ASL_CPUFV);
+
+struct eeepc_cpufv {
+ int num;
+ int cur;
+};
+
+static int get_cpufv(struct eeepc_cpufv *c)
+{
+ c->cur = get_acpi(CM_ASL_CPUFV);
+ c->num = (c->cur >> 8) & 0xff;
+ c->cur &= 0xff;
+ if (c->cur < 0 || c->num <= 0 || c->num > 12)
+ return -ENODEV;
+ return 0;
+}
+
+static ssize_t show_available_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_cpufv c;
+ int i;
+ ssize_t len = 0;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ for (i = 0; i < c.num; i++)
+ len += sprintf(buf + len, "%d ", i);
+ len += sprintf(buf + len, "\n");
+ return len;
+}
+
+static ssize_t show_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct eeepc_cpufv c;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ return sprintf(buf, "%#x\n", (c.num << 8) | c.cur);
+}
+
+static ssize_t store_cpufv(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct eeepc_cpufv c;
+ int rv, value;
+
+ if (get_cpufv(&c))
+ return -ENODEV;
+ rv = parse_arg(buf, count, &value);
+ if (rv < 0)
+ return rv;
+ if (!rv || value < 0 || value >= c.num)
+ return -EINVAL;
+ set_acpi(CM_ASL_CPUFV, value);
+ return rv;
+}
+
+static struct device_attribute dev_attr_cpufv = {
+ .attr = {
+ .name = "cpufv",
+ .mode = 0644 },
+ .show = show_cpufv,
+ .store = store_cpufv
+};
+
+static struct device_attribute dev_attr_available_cpufv = {
+ .attr = {
+ .name = "available_cpufv",
+ .mode = 0444 },
+ .show = show_available_cpufv
+};
static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_disp.attr,
&dev_attr_cpufv.attr,
+ &dev_attr_available_cpufv.attr,
NULL
};
@@ -439,6 +554,28 @@
return -EINVAL;
}
+static void cmsg_quirk(int cm, const char *name)
+{
+ int dummy;
+
+ /* Some BIOSes do not report cm although it is avaliable.
+ Check if cm_getv[cm] works and, if yes, assume cm should be set. */
+ if (!(ehotk->cm_supported & (1 << cm))
+ && !read_acpi_int(ehotk->handle, cm_getv[cm], &dummy)) {
+ pr_info("%s (%x) not reported by BIOS,"
+ " enabling anyway\n", name, 1 << cm);
+ ehotk->cm_supported |= 1 << cm;
+ }
+}
+
+static void cmsg_quirks(void)
+{
+ cmsg_quirk(CM_ASL_LID, "LID");
+ cmsg_quirk(CM_ASL_TYPE, "TYPE");
+ cmsg_quirk(CM_ASL_PANELPOWER, "PANELPOWER");
+ cmsg_quirk(CM_ASL_TPD, "TPD");
+}
+
static int eeepc_hotk_check(void)
{
const struct key_entry *key;
@@ -451,26 +588,24 @@
if (ehotk->device->status.present) {
if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
&buffer)) {
- printk(EEEPC_ERR "Hotkey initialization failed\n");
+ pr_err("Hotkey initialization failed\n");
return -ENODEV;
} else {
- printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n",
- ehotk->init_flag);
+ pr_notice("Hotkey init flags 0x%x\n", ehotk->init_flag);
}
/* get control methods supported */
if (read_acpi_int(ehotk->handle, "CMSG"
, &ehotk->cm_supported)) {
- printk(EEEPC_ERR
- "Get control methods supported failed\n");
+ pr_err("Get control methods supported failed\n");
return -ENODEV;
} else {
- printk(EEEPC_INFO
- "Get control methods supported: 0x%x\n",
- ehotk->cm_supported);
+ cmsg_quirks();
+ pr_info("Get control methods supported: 0x%x\n",
+ ehotk->cm_supported);
}
ehotk->inputdev = input_allocate_device();
if (!ehotk->inputdev) {
- printk(EEEPC_INFO "Unable to allocate input device\n");
+ pr_info("Unable to allocate input device\n");
return 0;
}
ehotk->inputdev->name = "Asus EeePC extra buttons";
@@ -489,12 +624,12 @@
}
result = input_register_device(ehotk->inputdev);
if (result) {
- printk(EEEPC_INFO "Unable to register input device\n");
+ pr_info("Unable to register input device\n");
input_free_device(ehotk->inputdev);
return 0;
}
} else {
- printk(EEEPC_ERR "Hotkey device not present, aborting\n");
+ pr_err("Hotkey device not present, aborting\n");
return -EINVAL;
}
return 0;
@@ -512,17 +647,27 @@
return -1;
}
-static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
+ u8 *value)
+{
+ int val = get_acpi(CM_ASL_WLAN);
+
+ if (val == 1 || val == 0)
+ *value = val;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static void eeepc_rfkill_hotplug(void)
{
struct pci_dev *dev;
struct pci_bus *bus = pci_find_bus(0, 1);
bool blocked;
- if (event != ACPI_NOTIFY_BUS_CHECK)
- return;
-
if (!bus) {
- printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
+ pr_warning("Unable to find PCI bus 1?\n");
return;
}
@@ -538,7 +683,7 @@
if (dev) {
pci_bus_assign_resources(bus);
if (pci_bus_add_device(dev))
- printk(EEEPC_ERR "Unable to hotplug wifi\n");
+ pr_err("Unable to hotplug wifi\n");
}
} else {
dev = pci_get_slot(bus, 0);
@@ -548,10 +693,18 @@
}
}
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
+ rfkill_set_sw_state(ehotk->wlan_rfkill, blocked);
}
-static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
+static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+{
+ if (event != ACPI_NOTIFY_BUS_CHECK)
+ return;
+
+ eeepc_rfkill_hotplug();
+}
+
+static void eeepc_hotk_notify(struct acpi_device *device, u32 event)
{
static struct key_entry *key;
u16 count;
@@ -559,6 +712,8 @@
if (!ehotk)
return;
+ if (event > ACPI_MAX_SYS_NOTIFY)
+ return;
if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
brn = notify_brn();
count = ehotk->event_count[event % 128]++;
@@ -611,8 +766,7 @@
eeepc_rfkill_notify,
NULL);
if (ACPI_FAILURE(status))
- printk(EEEPC_WARNING
- "Failed to register notify on %s\n", node);
+ pr_warning("Failed to register notify on %s\n", node);
} else
return -ENODEV;
@@ -631,20 +785,66 @@
ACPI_SYSTEM_NOTIFY,
eeepc_rfkill_notify);
if (ACPI_FAILURE(status))
- printk(EEEPC_ERR
- "Error removing rfkill notify handler %s\n",
+ pr_err("Error removing rfkill notify handler %s\n",
node);
}
}
+static void eeepc_cleanup_pci_hotplug(struct hotplug_slot *hotplug_slot)
+{
+ kfree(hotplug_slot->info);
+ kfree(hotplug_slot);
+}
+
+static int eeepc_setup_pci_hotplug(void)
+{
+ int ret = -ENOMEM;
+ struct pci_bus *bus = pci_find_bus(0, 1);
+
+ if (!bus) {
+ pr_err("Unable to find wifi PCI bus\n");
+ return -ENODEV;
+ }
+
+ ehotk->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
+ if (!ehotk->hotplug_slot)
+ goto error_slot;
+
+ ehotk->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
+ GFP_KERNEL);
+ if (!ehotk->hotplug_slot->info)
+ goto error_info;
+
+ ehotk->hotplug_slot->private = ehotk;
+ ehotk->hotplug_slot->release = &eeepc_cleanup_pci_hotplug;
+ ehotk->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
+ eeepc_get_adapter_status(ehotk->hotplug_slot,
+ &ehotk->hotplug_slot->info->adapter_status);
+
+ ret = pci_hp_register(ehotk->hotplug_slot, bus, 0, "eeepc-wifi");
+ if (ret) {
+ pr_err("Unable to register hotplug slot - %d\n", ret);
+ goto error_register;
+ }
+
+ return 0;
+
+error_register:
+ kfree(ehotk->hotplug_slot->info);
+error_info:
+ kfree(ehotk->hotplug_slot);
+ ehotk->hotplug_slot = NULL;
+error_slot:
+ return ret;
+}
+
static int eeepc_hotk_add(struct acpi_device *device)
{
- acpi_status status = AE_OK;
int result;
if (!device)
return -EINVAL;
- printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n");
+ pr_notice(EEEPC_HOTK_NAME "\n");
ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
if (!ehotk)
return -ENOMEM;
@@ -657,58 +857,9 @@
result = eeepc_hotk_check();
if (result)
goto ehotk_fail;
- status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
- eeepc_hotk_notify, ehotk);
- if (ACPI_FAILURE(status))
- printk(EEEPC_ERR "Error installing notify handler\n");
-
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
-
- if (get_acpi(CM_ASL_WLAN) != -1) {
- ehotk->eeepc_wlan_rfkill = rfkill_alloc("eeepc-wlan",
- &device->dev,
- RFKILL_TYPE_WLAN,
- &eeepc_rfkill_ops,
- (void *)CM_ASL_WLAN);
-
- if (!ehotk->eeepc_wlan_rfkill)
- goto wlan_fail;
-
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
- get_acpi(CM_ASL_WLAN) != 1);
- result = rfkill_register(ehotk->eeepc_wlan_rfkill);
- if (result)
- goto wlan_fail;
- }
-
- if (get_acpi(CM_ASL_BLUETOOTH) != -1) {
- ehotk->eeepc_bluetooth_rfkill =
- rfkill_alloc("eeepc-bluetooth",
- &device->dev,
- RFKILL_TYPE_BLUETOOTH,
- &eeepc_rfkill_ops,
- (void *)CM_ASL_BLUETOOTH);
-
- if (!ehotk->eeepc_bluetooth_rfkill)
- goto bluetooth_fail;
-
- rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
- get_acpi(CM_ASL_BLUETOOTH) != 1);
- result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
- if (result)
- goto bluetooth_fail;
- }
return 0;
- bluetooth_fail:
- rfkill_destroy(ehotk->eeepc_bluetooth_rfkill);
- rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- wlan_fail:
- rfkill_destroy(ehotk->eeepc_wlan_rfkill);
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
ehotk_fail:
kfree(ehotk);
ehotk = NULL;
@@ -718,22 +869,39 @@
static int eeepc_hotk_remove(struct acpi_device *device, int type)
{
- acpi_status status = 0;
-
if (!device || !acpi_driver_data(device))
return -EINVAL;
- status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
- eeepc_hotk_notify);
- if (ACPI_FAILURE(status))
- printk(EEEPC_ERR "Error removing notify handler\n");
-
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
- eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
kfree(ehotk);
return 0;
}
+static int eeepc_hotk_resume(struct acpi_device *device)
+{
+ if (ehotk->wlan_rfkill) {
+ bool wlan;
+
+ /* Workaround - it seems that _PTS disables the wireless
+ without notification or changing the value read by WLAN.
+ Normally this is fine because the correct value is restored
+ from the non-volatile storage on resume, but we need to do
+ it ourself if case suspend is aborted, or we lose wireless.
+ */
+ wlan = get_acpi(CM_ASL_WLAN);
+ set_acpi(CM_ASL_WLAN, wlan);
+
+ rfkill_set_sw_state(ehotk->wlan_rfkill, wlan != 1);
+
+ eeepc_rfkill_hotplug();
+ }
+
+ if (ehotk->bluetooth_rfkill)
+ rfkill_set_sw_state(ehotk->bluetooth_rfkill,
+ get_acpi(CM_ASL_BLUETOOTH) != 1);
+
+ return 0;
+}
+
/*
* Hwmon
*/
@@ -850,10 +1018,16 @@
static void eeepc_rfkill_exit(void)
{
- if (ehotk->eeepc_wlan_rfkill)
- rfkill_unregister(ehotk->eeepc_wlan_rfkill);
- if (ehotk->eeepc_bluetooth_rfkill)
- rfkill_unregister(ehotk->eeepc_bluetooth_rfkill);
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_unregister_rfkill_notifier("\\_SB.PCI0.P0P7");
+ if (ehotk->wlan_rfkill)
+ rfkill_unregister(ehotk->wlan_rfkill);
+ if (ehotk->bluetooth_rfkill)
+ rfkill_unregister(ehotk->bluetooth_rfkill);
+ if (ehotk->wwan3g_rfkill)
+ rfkill_unregister(ehotk->wwan3g_rfkill);
+ if (ehotk->hotplug_slot)
+ pci_hp_deregister(ehotk->hotplug_slot);
}
static void eeepc_input_exit(void)
@@ -888,6 +1062,75 @@
platform_driver_unregister(&platform_driver);
}
+static int eeepc_new_rfkill(struct rfkill **rfkill,
+ const char *name, struct device *dev,
+ enum rfkill_type type, int cm)
+{
+ int result;
+
+ result = get_acpi(cm);
+ if (result < 0)
+ return result;
+
+ *rfkill = rfkill_alloc(name, dev, type,
+ &eeepc_rfkill_ops, (void *)(unsigned long)cm);
+
+ if (!*rfkill)
+ return -EINVAL;
+
+ rfkill_init_sw_state(*rfkill, get_acpi(cm) != 1);
+ result = rfkill_register(*rfkill);
+ if (result) {
+ rfkill_destroy(*rfkill);
+ *rfkill = NULL;
+ return result;
+ }
+ return 0;
+}
+
+
+static int eeepc_rfkill_init(struct device *dev)
+{
+ int result = 0;
+
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P6");
+ eeepc_register_rfkill_notifier("\\_SB.PCI0.P0P7");
+
+ result = eeepc_new_rfkill(&ehotk->wlan_rfkill,
+ "eeepc-wlan", dev,
+ RFKILL_TYPE_WLAN, CM_ASL_WLAN);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(&ehotk->bluetooth_rfkill,
+ "eeepc-bluetooth", dev,
+ RFKILL_TYPE_BLUETOOTH, CM_ASL_BLUETOOTH);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_new_rfkill(&ehotk->wwan3g_rfkill,
+ "eeepc-wwan3g", dev,
+ RFKILL_TYPE_WWAN, CM_ASL_3G);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = eeepc_setup_pci_hotplug();
+ /*
+ * If we get -EBUSY then something else is handling the PCI hotplug -
+ * don't fail in this case
+ */
+ if (result == -EBUSY)
+ result = 0;
+
+exit:
+ if (result && result != -ENODEV)
+ eeepc_rfkill_exit();
+ return result;
+}
+
static int eeepc_backlight_init(struct device *dev)
{
struct backlight_device *bd;
@@ -895,8 +1138,7 @@
bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
NULL, &eeepcbl_ops);
if (IS_ERR(bd)) {
- printk(EEEPC_ERR
- "Could not register eeepc backlight device\n");
+ pr_err("Could not register eeepc backlight device\n");
eeepc_backlight_device = NULL;
return PTR_ERR(bd);
}
@@ -915,8 +1157,7 @@
hwmon = hwmon_device_register(dev);
if (IS_ERR(hwmon)) {
- printk(EEEPC_ERR
- "Could not register eeepc hwmon device\n");
+ pr_err("Could not register eeepc hwmon device\n");
eeepc_hwmon_device = NULL;
return PTR_ERR(hwmon);
}
@@ -942,19 +1183,9 @@
acpi_bus_unregister_driver(&eeepc_hotk_driver);
return -ENODEV;
}
- dev = acpi_get_physical_device(ehotk->device->handle);
- if (!acpi_video_backlight_support()) {
- result = eeepc_backlight_init(dev);
- if (result)
- goto fail_backlight;
- } else
- printk(EEEPC_INFO "Backlight controlled by ACPI video "
- "driver\n");
+ eeepc_enable_camera();
- result = eeepc_hwmon_init(dev);
- if (result)
- goto fail_hwmon;
/* Register platform stuff */
result = platform_driver_register(&platform_driver);
if (result)
@@ -971,7 +1202,33 @@
&platform_attribute_group);
if (result)
goto fail_sysfs;
+
+ dev = &platform_device->dev;
+
+ if (!acpi_video_backlight_support()) {
+ result = eeepc_backlight_init(dev);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video "
+ "driver\n");
+
+ result = eeepc_hwmon_init(dev);
+ if (result)
+ goto fail_hwmon;
+
+ result = eeepc_rfkill_init(dev);
+ if (result)
+ goto fail_rfkill;
+
return 0;
+fail_rfkill:
+ eeepc_hwmon_exit();
+fail_hwmon:
+ eeepc_backlight_exit();
+fail_backlight:
+ sysfs_remove_group(&platform_device->dev.kobj,
+ &platform_attribute_group);
fail_sysfs:
platform_device_del(platform_device);
fail_platform_device2:
@@ -979,12 +1236,7 @@
fail_platform_device1:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
- eeepc_hwmon_exit();
-fail_hwmon:
- eeepc_backlight_exit();
-fail_backlight:
eeepc_input_exit();
- eeepc_rfkill_exit();
return result;
}
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 16fffe44..4ac2311 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -47,7 +47,7 @@
#define HPWMI_DISPLAY_QUERY 0x1
#define HPWMI_HDDTEMP_QUERY 0x2
#define HPWMI_ALS_QUERY 0x3
-#define HPWMI_DOCK_QUERY 0x4
+#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_HOTKEY_QUERY 0xc
@@ -75,10 +75,9 @@
u16 keycode;
};
-enum { KE_KEY, KE_SW, KE_END };
+enum { KE_KEY, KE_END };
static struct key_entry hp_wmi_keymap[] = {
- {KE_SW, 0x01, SW_DOCK},
{KE_KEY, 0x02, KEY_BRIGHTNESSUP},
{KE_KEY, 0x03, KEY_BRIGHTNESSDOWN},
{KE_KEY, 0x20e6, KEY_PROG1},
@@ -151,7 +150,22 @@
static int hp_wmi_dock_state(void)
{
- return hp_wmi_perform_query(HPWMI_DOCK_QUERY, 0, 0);
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0);
+
+ if (ret < 0)
+ return ret;
+
+ return ret & 0x1;
+}
+
+static int hp_wmi_tablet_state(void)
+{
+ int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, 0);
+
+ if (ret < 0)
+ return ret;
+
+ return (ret & 0x4) ? 1 : 0;
}
static int hp_wmi_set_block(void *data, bool blocked)
@@ -232,6 +246,15 @@
return sprintf(buf, "%d\n", value);
}
+static ssize_t show_tablet(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int value = hp_wmi_tablet_state();
+ if (value < 0)
+ return -EINVAL;
+ return sprintf(buf, "%d\n", value);
+}
+
static ssize_t set_als(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -244,6 +267,7 @@
static DEVICE_ATTR(hddtemp, S_IRUGO, show_hddtemp, NULL);
static DEVICE_ATTR(als, S_IRUGO | S_IWUSR, show_als, set_als);
static DEVICE_ATTR(dock, S_IRUGO, show_dock, NULL);
+static DEVICE_ATTR(tablet, S_IRUGO, show_tablet, NULL);
static struct key_entry *hp_wmi_get_entry_by_scancode(int code)
{
@@ -326,13 +350,13 @@
key->keycode, 0);
input_sync(hp_wmi_input_dev);
break;
- case KE_SW:
- input_report_switch(hp_wmi_input_dev,
- key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
}
+ } else if (eventcode == 0x1) {
+ input_report_switch(hp_wmi_input_dev, SW_DOCK,
+ hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
} else if (eventcode == 0x5) {
if (wifi_rfkill)
rfkill_set_sw_state(wifi_rfkill,
@@ -369,18 +393,19 @@
set_bit(EV_KEY, hp_wmi_input_dev->evbit);
set_bit(key->keycode, hp_wmi_input_dev->keybit);
break;
- case KE_SW:
- set_bit(EV_SW, hp_wmi_input_dev->evbit);
- set_bit(key->keycode, hp_wmi_input_dev->swbit);
-
- /* Set initial dock state */
- input_report_switch(hp_wmi_input_dev, key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
}
}
+ set_bit(EV_SW, hp_wmi_input_dev->evbit);
+ set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+ set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+
+ /* Set initial hardware state */
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
+
err = input_register_device(hp_wmi_input_dev);
if (err) {
@@ -397,6 +422,7 @@
device_remove_file(&device->dev, &dev_attr_hddtemp);
device_remove_file(&device->dev, &dev_attr_als);
device_remove_file(&device->dev, &dev_attr_dock);
+ device_remove_file(&device->dev, &dev_attr_tablet);
}
static int __init hp_wmi_bios_setup(struct platform_device *device)
@@ -416,6 +442,9 @@
err = device_create_file(&device->dev, &dev_attr_dock);
if (err)
goto add_sysfs_error;
+ err = device_create_file(&device->dev, &dev_attr_tablet);
+ if (err)
+ goto add_sysfs_error;
if (wireless & 0x1) {
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
@@ -485,23 +514,17 @@
static int hp_wmi_resume_handler(struct platform_device *device)
{
- struct key_entry *key;
-
/*
- * Docking state may have changed while suspended, so trigger
- * an input event for the current state. As this is a switch,
+ * Hardware state may have changed while suspended, so trigger
+ * input events for the current state. As this is a switch,
* the input layer will only actually pass it on if the state
* changed.
*/
- for (key = hp_wmi_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_SW:
- input_report_switch(hp_wmi_input_dev, key->keycode,
- hp_wmi_dock_state());
- input_sync(hp_wmi_input_dev);
- break;
- }
- }
+
+ input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+ hp_wmi_tablet_state());
+ input_sync(hp_wmi_input_dev);
return 0;
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 86e9585..a463fd7 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
*/
#define TPACPI_VERSION "0.23"
-#define TPACPI_SYSFS_VERSION 0x020300
+#define TPACPI_SYSFS_VERSION 0x020400
/*
* Changelog:
@@ -257,6 +257,8 @@
u32 wan:1;
u32 uwb:1;
u32 fan_ctrl_status_undef:1;
+ u32 second_fan:1;
+ u32 beep_needs_two_args:1;
u32 input_device_registered:1;
u32 platform_drv_registered:1;
u32 platform_drv_attrs_registered:1;
@@ -277,8 +279,10 @@
char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
- u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
+ u16 bios_model; /* 1Y = 0x5931, 0 = unknown */
u16 ec_model;
+ u16 bios_release; /* 1ZETK1WW = 0x314b, 0 = unknown */
+ u16 ec_release;
char *model_str; /* ThinkPad T43 */
char *nummodel_str; /* 9384A9C for a 9384-A9C model */
@@ -355,6 +359,73 @@
} \
} while (0)
+/*
+ * Quirk handling helpers
+ *
+ * ThinkPad IDs and versions seen in the field so far
+ * are two-characters from the set [0-9A-Z], i.e. base 36.
+ *
+ * We use values well outside that range as specials.
+ */
+
+#define TPACPI_MATCH_ANY 0xffffU
+#define TPACPI_MATCH_UNKNOWN 0U
+
+/* TPID('1', 'Y') == 0x5931 */
+#define TPID(__c1, __c2) (((__c2) << 8) | (__c1))
+
+#define TPACPI_Q_IBM(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+#define TPACPI_Q_LNV(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPID(__id1, __id2), \
+ .ec = TPACPI_MATCH_ANY, \
+ .quirks = (__quirk) }
+
+struct tpacpi_quirk {
+ unsigned int vendor;
+ u16 bios;
+ u16 ec;
+ unsigned long quirks;
+};
+
+/**
+ * tpacpi_check_quirks() - search BIOS/EC version on a list
+ * @qlist: array of &struct tpacpi_quirk
+ * @qlist_size: number of elements in @qlist
+ *
+ * Iterates over a quirks list until one is found that matches the
+ * ThinkPad's vendor, BIOS and EC model.
+ *
+ * Returns 0 if nothing matches, otherwise returns the quirks field of
+ * the matching &struct tpacpi_quirk entry.
+ *
+ * The match criteria is: vendor, ec and bios much match.
+ */
+static unsigned long __init tpacpi_check_quirks(
+ const struct tpacpi_quirk *qlist,
+ unsigned int qlist_size)
+{
+ while (qlist_size) {
+ if ((qlist->vendor == thinkpad_id.vendor ||
+ qlist->vendor == TPACPI_MATCH_ANY) &&
+ (qlist->bios == thinkpad_id.bios_model ||
+ qlist->bios == TPACPI_MATCH_ANY) &&
+ (qlist->ec == thinkpad_id.ec_model ||
+ qlist->ec == TPACPI_MATCH_ANY))
+ return qlist->quirks;
+
+ qlist_size--;
+ qlist++;
+ }
+ return 0;
+}
+
+
/****************************************************************************
****************************************************************************
*
@@ -1163,8 +1234,8 @@
{
struct tpacpi_rfk *atp_rfk;
int res;
- bool initial_sw_state = false;
- int initial_sw_status;
+ bool sw_state = false;
+ int sw_status;
BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
@@ -1185,17 +1256,17 @@
atp_rfk->id = id;
atp_rfk->ops = tp_rfkops;
- initial_sw_status = (tp_rfkops->get_status)();
- if (initial_sw_status < 0) {
+ sw_status = (tp_rfkops->get_status)();
+ if (sw_status < 0) {
printk(TPACPI_ERR
"failed to read initial state for %s, error %d\n",
- name, initial_sw_status);
+ name, sw_status);
} else {
- initial_sw_state = (initial_sw_status == TPACPI_RFK_RADIO_OFF);
+ sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
if (set_default) {
/* try to keep the initial state, since we ask the
* firmware to preserve it across S5 in NVRAM */
- rfkill_set_sw_state(atp_rfk->rfkill, initial_sw_state);
+ rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
}
}
rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
@@ -2880,7 +2951,7 @@
/* update bright_acpimode... */
tpacpi_check_std_acpi_brightness_support();
- if (tp_features.bright_acpimode) {
+ if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
printk(TPACPI_INFO
"This ThinkPad has standard ACPI backlight "
"brightness control, supported by the ACPI "
@@ -4773,7 +4844,7 @@
"LED", /* all others */
); /* R30, R31 */
-#define TPACPI_LED_NUMLEDS 8
+#define TPACPI_LED_NUMLEDS 16
static struct tpacpi_led_classdev *tpacpi_leds;
static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
@@ -4786,15 +4857,20 @@
"tpacpi::dock_batt",
"tpacpi::unknown_led",
"tpacpi::standby",
+ "tpacpi::dock_status1",
+ "tpacpi::dock_status2",
+ "tpacpi::unknown_led2",
+ "tpacpi::unknown_led3",
+ "tpacpi::thinkvantage",
};
-#define TPACPI_SAFE_LEDS 0x0081U
+#define TPACPI_SAFE_LEDS 0x1081U
static inline bool tpacpi_is_led_restricted(const unsigned int led)
{
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
return false;
#else
- return (TPACPI_SAFE_LEDS & (1 << led)) == 0;
+ return (1U & (TPACPI_SAFE_LEDS >> led)) == 0;
#endif
}
@@ -4956,6 +5032,10 @@
tpacpi_leds[led].led = led;
+ /* LEDs with no name don't get registered */
+ if (!tpacpi_led_names[led])
+ return 0;
+
tpacpi_leds[led].led_classdev.brightness_set = &led_sysfs_set;
tpacpi_leds[led].led_classdev.blink_set = &led_sysfs_blink_set;
if (led_supported == TPACPI_LED_570)
@@ -4974,10 +5054,59 @@
return rc;
}
+static const struct tpacpi_quirk led_useful_qtable[] __initconst = {
+ TPACPI_Q_IBM('1', 'E', 0x009f), /* A30 */
+ TPACPI_Q_IBM('1', 'N', 0x009f), /* A31 */
+ TPACPI_Q_IBM('1', 'G', 0x009f), /* A31 */
+
+ TPACPI_Q_IBM('1', 'I', 0x0097), /* T30 */
+ TPACPI_Q_IBM('1', 'R', 0x0097), /* T40, T41, T42, R50, R51 */
+ TPACPI_Q_IBM('7', '0', 0x0097), /* T43, R52 */
+ TPACPI_Q_IBM('1', 'Y', 0x0097), /* T43 */
+ TPACPI_Q_IBM('1', 'W', 0x0097), /* R50e */
+ TPACPI_Q_IBM('1', 'V', 0x0097), /* R51 */
+ TPACPI_Q_IBM('7', '8', 0x0097), /* R51e */
+ TPACPI_Q_IBM('7', '6', 0x0097), /* R52 */
+
+ TPACPI_Q_IBM('1', 'K', 0x00bf), /* X30 */
+ TPACPI_Q_IBM('1', 'Q', 0x00bf), /* X31, X32 */
+ TPACPI_Q_IBM('1', 'U', 0x00bf), /* X40 */
+ TPACPI_Q_IBM('7', '4', 0x00bf), /* X41 */
+ TPACPI_Q_IBM('7', '5', 0x00bf), /* X41t */
+
+ TPACPI_Q_IBM('7', '9', 0x1f97), /* T60 (1) */
+ TPACPI_Q_IBM('7', '7', 0x1f97), /* Z60* (1) */
+ TPACPI_Q_IBM('7', 'F', 0x1f97), /* Z61* (1) */
+ TPACPI_Q_IBM('7', 'B', 0x1fb7), /* X60 (1) */
+
+ /* (1) - may have excess leds enabled on MSB */
+
+ /* Defaults (order matters, keep last, don't reorder!) */
+ { /* Lenovo */
+ .vendor = PCI_VENDOR_ID_LENOVO,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = 0x1fffU,
+ },
+ { /* IBM ThinkPads with no EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_UNKNOWN,
+ .quirks = 0x00ffU,
+ },
+ { /* IBM ThinkPads with EC version string */
+ .vendor = PCI_VENDOR_ID_IBM,
+ .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
+ .quirks = 0x00bfU,
+ },
+};
+
+#undef TPACPI_LEDQ_IBM
+#undef TPACPI_LEDQ_LNV
+
static int __init led_init(struct ibm_init_struct *iibm)
{
unsigned int i;
int rc;
+ unsigned long useful_leds;
vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
@@ -4999,6 +5128,9 @@
vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
str_supported(led_supported), led_supported);
+ if (led_supported == TPACPI_LED_NONE)
+ return 1;
+
tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
GFP_KERNEL);
if (!tpacpi_leds) {
@@ -5006,8 +5138,12 @@
return -ENOMEM;
}
+ useful_leds = tpacpi_check_quirks(led_useful_qtable,
+ ARRAY_SIZE(led_useful_qtable));
+
for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
- if (!tpacpi_is_led_restricted(i)) {
+ if (!tpacpi_is_led_restricted(i) &&
+ test_bit(i, &useful_leds)) {
rc = tpacpi_init_led(i);
if (rc < 0) {
led_exit();
@@ -5017,12 +5153,11 @@
}
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
- if (led_supported != TPACPI_LED_NONE)
- printk(TPACPI_NOTICE
- "warning: userspace override of important "
- "firmware LEDs is enabled\n");
+ printk(TPACPI_NOTICE
+ "warning: userspace override of important "
+ "firmware LEDs is enabled\n");
#endif
- return (led_supported != TPACPI_LED_NONE)? 0 : 1;
+ return 0;
}
#define str_led_status(s) \
@@ -5052,7 +5187,7 @@
}
len += sprintf(p + len, "commands:\t"
- "<led> on, <led> off, <led> blink (<led> is 0-7)\n");
+ "<led> on, <led> off, <led> blink (<led> is 0-15)\n");
return len;
}
@@ -5067,7 +5202,7 @@
return -ENODEV;
while ((cmd = next_cmd(&buf))) {
- if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 7)
+ if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15)
return -EINVAL;
if (strstr(cmd, "off")) {
@@ -5101,8 +5236,17 @@
TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */
+#define TPACPI_BEEP_Q1 0x0001
+
+static const struct tpacpi_quirk beep_quirk_table[] __initconst = {
+ TPACPI_Q_IBM('I', 'M', TPACPI_BEEP_Q1), /* 570 */
+ TPACPI_Q_IBM('I', 'U', TPACPI_BEEP_Q1), /* 570E - unverified */
+};
+
static int __init beep_init(struct ibm_init_struct *iibm)
{
+ unsigned long quirks;
+
vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n");
TPACPI_ACPIHANDLE_INIT(beep);
@@ -5110,6 +5254,11 @@
vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n",
str_supported(beep_handle != NULL));
+ quirks = tpacpi_check_quirks(beep_quirk_table,
+ ARRAY_SIZE(beep_quirk_table));
+
+ tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1);
+
return (beep_handle)? 0 : 1;
}
@@ -5141,8 +5290,15 @@
/* beep_cmd set */
} else
return -EINVAL;
- if (!acpi_evalf(beep_handle, NULL, NULL, "vdd", beep_cmd, 0))
- return -EIO;
+ if (tp_features.beep_needs_two_args) {
+ if (!acpi_evalf(beep_handle, NULL, NULL, "vdd",
+ beep_cmd, 0))
+ return -EIO;
+ } else {
+ if (!acpi_evalf(beep_handle, NULL, NULL, "vd",
+ beep_cmd))
+ return -EIO;
+ }
}
return 0;
@@ -5569,6 +5725,10 @@
* Bit 3-0: backlight brightness level
*
* brightness_get_raw returns status data in the HBRV layout
+ *
+ * WARNING: The X61 has been verified to use HBRV for something else, so
+ * this should be used _only_ on IBM ThinkPads, and maybe with some careful
+ * testing on the very early *60 Lenovo models...
*/
enum {
@@ -5869,6 +6029,12 @@
brightness_mode);
}
+ /* Safety */
+ if (thinkpad_id.vendor != PCI_VENDOR_ID_IBM &&
+ (brightness_mode == TPACPI_BRGHT_MODE_ECNVRAM ||
+ brightness_mode == TPACPI_BRGHT_MODE_EC))
+ return -EINVAL;
+
if (tpacpi_brightness_get_raw(&b) < 0)
return 1;
@@ -6161,6 +6327,21 @@
* For firmware bugs, refer to:
* http://thinkwiki.org/wiki/Embedded_Controller_Firmware#Firmware_Issues
*
+ * ----
+ *
+ * ThinkPad EC register 0x31 bit 0 (only on select models)
+ *
+ * When bit 0 of EC register 0x31 is zero, the tachometer registers
+ * show the speed of the main fan. When bit 0 of EC register 0x31
+ * is one, the tachometer registers show the speed of the auxiliary
+ * fan.
+ *
+ * Fan control seems to affect both fans, regardless of the state
+ * of this bit.
+ *
+ * So far, only the firmware for the X60/X61 non-tablet versions
+ * seem to support this (firmware TP-7M).
+ *
* TPACPI_FAN_WR_ACPI_FANS:
* ThinkPad X31, X40, X41. Not available in the X60.
*
@@ -6187,6 +6368,8 @@
fan_status_offset = 0x2f, /* EC register 0x2f */
fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
* 0x84 must be read before 0x85 */
+ fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M)
+ bit 0 selects which fan is active */
TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
@@ -6249,30 +6432,18 @@
* We assume 0x07 really means auto mode while this quirk is active,
* as this is far more likely than the ThinkPad being in level 7,
* which is only used by the firmware during thermal emergencies.
+ *
+ * Enable for TP-1Y (T43), TP-78 (R51e), TP-76 (R52),
+ * TP-70 (T43, R52), which are known to be buggy.
*/
-static void fan_quirk1_detect(void)
+static void fan_quirk1_setup(void)
{
- /* In some ThinkPads, neither the EC nor the ACPI
- * DSDT initialize the HFSP register, and it ends up
- * being initially set to 0x07 when it *could* be
- * either 0x07 or 0x80.
- *
- * Enable for TP-1Y (T43), TP-78 (R51e),
- * TP-76 (R52), TP-70 (T43, R52), which are known
- * to be buggy. */
if (fan_control_initial_status == 0x07) {
- switch (thinkpad_id.ec_model) {
- case 0x5931: /* TP-1Y */
- case 0x3837: /* TP-78 */
- case 0x3637: /* TP-76 */
- case 0x3037: /* TP-70 */
- printk(TPACPI_NOTICE
- "fan_init: initial fan status is unknown, "
- "assuming it is in auto mode\n");
- tp_features.fan_ctrl_status_undef = 1;
- ;;
- }
+ printk(TPACPI_NOTICE
+ "fan_init: initial fan status is unknown, "
+ "assuming it is in auto mode\n");
+ tp_features.fan_ctrl_status_undef = 1;
}
}
@@ -6292,6 +6463,38 @@
}
}
+/* Select main fan on X60/X61, NOOP on others */
+static bool fan_select_fan1(void)
+{
+ if (tp_features.second_fan) {
+ u8 val;
+
+ if (ec_read(fan_select_offset, &val) < 0)
+ return false;
+ val &= 0xFEU;
+ if (ec_write(fan_select_offset, val) < 0)
+ return false;
+ }
+ return true;
+}
+
+/* Select secondary fan on X60/X61 */
+static bool fan_select_fan2(void)
+{
+ u8 val;
+
+ if (!tp_features.second_fan)
+ return false;
+
+ if (ec_read(fan_select_offset, &val) < 0)
+ return false;
+ val |= 0x01U;
+ if (ec_write(fan_select_offset, val) < 0)
+ return false;
+
+ return true;
+}
+
/*
* Call with fan_mutex held
*/
@@ -6369,6 +6572,8 @@
switch (fan_status_access_mode) {
case TPACPI_FAN_RD_TPEC:
/* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!fan_select_fan1()))
+ return -EIO;
if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
!acpi_ec_read(fan_rpm_offset + 1, &hi)))
return -EIO;
@@ -6385,6 +6590,34 @@
return 0;
}
+static int fan2_get_speed(unsigned int *speed)
+{
+ u8 hi, lo;
+ bool rc;
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!fan_select_fan2()))
+ return -EIO;
+ rc = !acpi_ec_read(fan_rpm_offset, &lo) ||
+ !acpi_ec_read(fan_rpm_offset + 1, &hi);
+ fan_select_fan1(); /* play it safe */
+ if (rc)
+ return -EIO;
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
+
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
static int fan_set_level(int level)
{
if (!fan_control_allowed)
@@ -6790,6 +7023,25 @@
__ATTR(fan1_input, S_IRUGO,
fan_fan1_input_show, NULL);
+/* sysfs fan fan2_input ------------------------------------------------ */
+static ssize_t fan_fan2_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int res;
+ unsigned int speed;
+
+ res = fan2_get_speed(&speed);
+ if (res < 0)
+ return res;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", speed);
+}
+
+static struct device_attribute dev_attr_fan_fan2_input =
+ __ATTR(fan2_input, S_IRUGO,
+ fan_fan2_input_show, NULL);
+
/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
char *buf)
@@ -6823,6 +7075,7 @@
static struct attribute *fan_attributes[] = {
&dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr,
&dev_attr_fan_fan1_input.attr,
+ NULL, /* for fan2_input */
NULL
};
@@ -6830,9 +7083,36 @@
.attrs = fan_attributes,
};
+#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
+#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+
+#define TPACPI_FAN_QI(__id1, __id2, __quirks) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = __quirks }
+
+#define TPACPI_FAN_QL(__id1, __id2, __quirks) \
+ { .vendor = PCI_VENDOR_ID_LENOVO, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = __quirks }
+
+static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
+ TPACPI_FAN_QI('1', 'Y', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '8', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '6', TPACPI_FAN_Q1),
+ TPACPI_FAN_QI('7', '0', TPACPI_FAN_Q1),
+ TPACPI_FAN_QL('7', 'M', TPACPI_FAN_2FAN),
+};
+
+#undef TPACPI_FAN_QL
+#undef TPACPI_FAN_QI
+
static int __init fan_init(struct ibm_init_struct *iibm)
{
int rc;
+ unsigned long quirks;
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
"initializing fan subdriver\n");
@@ -6843,12 +7123,16 @@
fan_control_commands = 0;
fan_watchdog_maxinterval = 0;
tp_features.fan_ctrl_status_undef = 0;
+ tp_features.second_fan = 0;
fan_control_desired_level = 7;
TPACPI_ACPIHANDLE_INIT(fans);
TPACPI_ACPIHANDLE_INIT(gfan);
TPACPI_ACPIHANDLE_INIT(sfan);
+ quirks = tpacpi_check_quirks(fan_quirk_table,
+ ARRAY_SIZE(fan_quirk_table));
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
@@ -6858,7 +7142,13 @@
if (likely(acpi_ec_read(fan_status_offset,
&fan_control_initial_status))) {
fan_status_access_mode = TPACPI_FAN_RD_TPEC;
- fan_quirk1_detect();
+ if (quirks & TPACPI_FAN_Q1)
+ fan_quirk1_setup();
+ if (quirks & TPACPI_FAN_2FAN) {
+ tp_features.second_fan = 1;
+ dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_FAN,
+ "secondary fan support enabled\n");
+ }
} else {
printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
@@ -6914,6 +7204,11 @@
if (fan_status_access_mode != TPACPI_FAN_NONE ||
fan_control_access_mode != TPACPI_FAN_WR_NONE) {
+ if (tp_features.second_fan) {
+ /* attach second fan tachometer */
+ fan_attributes[ARRAY_SIZE(fan_attributes)-2] =
+ &dev_attr_fan_fan2_input.attr;
+ }
rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
&fan_attr_group);
if (rc < 0)
@@ -7385,6 +7680,24 @@
/* Probing */
+static bool __pure __init tpacpi_is_fw_digit(const char c)
+{
+ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z');
+}
+
+/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */
+static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
+ const char t)
+{
+ return s && strlen(s) >= 8 &&
+ tpacpi_is_fw_digit(s[0]) &&
+ tpacpi_is_fw_digit(s[1]) &&
+ s[2] == t && s[3] == 'T' &&
+ tpacpi_is_fw_digit(s[4]) &&
+ tpacpi_is_fw_digit(s[5]) &&
+ s[6] == 'W' && s[7] == 'W';
+}
+
/* returns 0 - probe ok, or < 0 - probe error.
* Probe ok doesn't mean thinkpad found.
* On error, kfree() cleanup on tp->* is not performed, caller must do it */
@@ -7411,10 +7724,15 @@
tp->bios_version_str = kstrdup(s, GFP_KERNEL);
if (s && !tp->bios_version_str)
return -ENOMEM;
- if (!tp->bios_version_str)
+
+ /* Really ancient ThinkPad 240X will fail this, which is fine */
+ if (!tpacpi_is_valid_fw_id(tp->bios_version_str, 'E'))
return 0;
+
tp->bios_model = tp->bios_version_str[0]
| (tp->bios_version_str[1] << 8);
+ tp->bios_release = (tp->bios_version_str[4] << 8)
+ | tp->bios_version_str[5];
/*
* ThinkPad T23 or newer, A31 or newer, R50e or newer,
@@ -7433,8 +7751,21 @@
tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
if (!tp->ec_version_str)
return -ENOMEM;
- tp->ec_model = ec_fw_string[0]
- | (ec_fw_string[1] << 8);
+
+ if (tpacpi_is_valid_fw_id(ec_fw_string, 'H')) {
+ tp->ec_model = ec_fw_string[0]
+ | (ec_fw_string[1] << 8);
+ tp->ec_release = (ec_fw_string[4] << 8)
+ | ec_fw_string[5];
+ } else {
+ printk(TPACPI_NOTICE
+ "ThinkPad firmware release %s "
+ "doesn't match the known patterns\n",
+ ec_fw_string);
+ printk(TPACPI_NOTICE
+ "please report this to %s\n",
+ TPACPI_MAIL);
+ }
break;
}
}
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 7f207f3..ef3a2cd3a 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -287,6 +287,25 @@
ACPI_DECODE_16);
}
+static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev,
+ struct acpi_resource *res)
+{
+ struct acpi_resource_extended_address64 *p = &res->data.ext_address64;
+
+ if (p->producer_consumer == ACPI_PRODUCER)
+ return;
+
+ if (p->resource_type == ACPI_MEMORY_RANGE)
+ pnpacpi_parse_allocated_memresource(dev,
+ p->minimum, p->address_length,
+ p->info.mem.write_protect);
+ else if (p->resource_type == ACPI_IO_RANGE)
+ pnpacpi_parse_allocated_ioresource(dev,
+ p->minimum, p->address_length,
+ p->granularity == 0xfff ? ACPI_DECODE_10 :
+ ACPI_DECODE_16);
+}
+
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
void *data)
{
@@ -400,8 +419,7 @@
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
- if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER)
- return AE_OK;
+ pnpacpi_parse_allocated_ext_address_space(dev, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -630,6 +648,28 @@
IORESOURCE_IO_FIXED);
}
+static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
+ unsigned int option_flags,
+ struct acpi_resource *r)
+{
+ struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
+ unsigned char flags = 0;
+
+ if (p->address_length == 0)
+ return;
+
+ if (p->resource_type == ACPI_MEMORY_RANGE) {
+ if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
+ flags = IORESOURCE_MEM_WRITEABLE;
+ pnp_register_mem_resource(dev, option_flags, p->minimum,
+ p->minimum, 0, p->address_length,
+ flags);
+ } else if (p->resource_type == ACPI_IO_RANGE)
+ pnp_register_port_resource(dev, option_flags, p->minimum,
+ p->minimum, 0, p->address_length,
+ IORESOURCE_IO_FIXED);
+}
+
struct acpipnp_parse_option_s {
struct pnp_dev *dev;
unsigned int option_flags;
@@ -711,6 +751,7 @@
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+ pnpacpi_parse_ext_address_option(dev, option_flags, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -765,6 +806,7 @@
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
+ case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
return 1;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 33da112..7eda348 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -82,6 +82,14 @@
Say Y here to enable support for batteries charger integrated into
DA9030 PMIC.
+config BATTERY_MAX17040
+ tristate "Maxim MAX17040 Fuel Gauge"
+ depends on I2C
+ help
+ MAX17040 is fuel-gauge systems for lithium-ion (Li+) batteries
+ in handheld and portable equipment. The MAX17040 is configured
+ to operate with a single lithium cell
+
config CHARGER_PCF50633
tristate "NXP PCF50633 MBC"
depends on MFD_PCF50633
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 2fcf41d..daf3179 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,4 +25,5 @@
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
-obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
\ No newline at end of file
+obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
+obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index 1662bb0..3364198 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -22,8 +22,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#define DA9030_STATUS_CHDET (1 << 3)
-
#define DA9030_FAULT_LOG 0x0a
#define DA9030_FAULT_LOG_OVER_TEMP (1 << 7)
#define DA9030_FAULT_LOG_VBAT_OVER (1 << 4)
@@ -244,6 +242,8 @@
}
da903x_write(charger->master, DA9030_CHARGE_CONTROL, val);
+
+ power_supply_changed(&charger->psy);
}
static void da9030_charger_check_state(struct da9030_charger *charger)
@@ -258,6 +258,12 @@
da9030_set_charge(charger, 1);
}
} else {
+ /* Charger has been pulled out */
+ if (!charger->chdet) {
+ da9030_set_charge(charger, 0);
+ return;
+ }
+
if (charger->adc.vbat_res >=
charger->thresholds.vbat_charge_stop) {
da9030_set_charge(charger, 0);
@@ -395,13 +401,11 @@
{
struct da9030_charger *charger =
container_of(nb, struct da9030_charger, nb);
- int status;
switch (event) {
case DA9030_EVENT_CHDET:
- status = da903x_query_status(charger->master,
- DA9030_STATUS_CHDET);
- da9030_set_charge(charger, status);
+ cancel_delayed_work_sync(&charger->work);
+ schedule_work(&charger->work.work);
break;
case DA9030_EVENT_VBATMON:
da9030_battery_vbat_event(charger);
@@ -565,7 +569,8 @@
da903x_unregister_notifier(charger->master, &charger->nb,
DA9030_EVENT_CHDET | DA9030_EVENT_VBATMON |
DA9030_EVENT_CHIOVER | DA9030_EVENT_TBAT);
- cancel_delayed_work(&charger->work);
+ cancel_delayed_work_sync(&charger->work);
+ da9030_set_charge(charger, 0);
power_supply_unregister(&charger->psy);
kfree(charger);
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index a52d4a1..520b5c4 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -62,6 +62,10 @@
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+static unsigned int pmod_enabled;
+module_param(pmod_enabled, bool, 0644);
+MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit");
+
/* Some batteries have their rated capacity stored a N * 10 mAh, while
* others use an index into this table. */
static int rated_capacities[] = {
@@ -259,6 +263,17 @@
power_supply_changed(&di->bat);
}
+static void ds2760_battery_write_status(struct ds2760_device_info *di,
+ char status)
+{
+ if (status == di->raw[DS2760_STATUS_REG])
+ return;
+
+ w1_ds2760_write(di->w1_dev, &status, DS2760_STATUS_WRITE_REG, 1);
+ w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+ w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1);
+}
+
static void ds2760_battery_work(struct work_struct *work)
{
struct ds2760_device_info *di = container_of(work,
@@ -342,9 +357,9 @@
static int ds2760_battery_probe(struct platform_device *pdev)
{
+ char status;
int retval = 0;
struct ds2760_device_info *di;
- struct ds2760_platform_data *pdata;
di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di) {
@@ -354,14 +369,13 @@
platform_set_drvdata(pdev, di);
- pdata = pdev->dev.platform_data;
- di->dev = &pdev->dev;
- di->w1_dev = pdev->dev.parent;
- di->bat.name = dev_name(&pdev->dev);
- di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
- di->bat.properties = ds2760_battery_props;
- di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
- di->bat.get_property = ds2760_battery_get_property;
+ di->dev = &pdev->dev;
+ di->w1_dev = pdev->dev.parent;
+ di->bat.name = dev_name(&pdev->dev);
+ di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+ di->bat.properties = ds2760_battery_props;
+ di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props);
+ di->bat.get_property = ds2760_battery_get_property;
di->bat.external_power_changed =
ds2760_battery_external_power_changed;
@@ -373,6 +387,16 @@
goto batt_failed;
}
+ /* enable sleep mode feature */
+ ds2760_battery_read_status(di);
+ status = di->raw[DS2760_STATUS_REG];
+ if (pmod_enabled)
+ status |= DS2760_STATUS_PMOD;
+ else
+ status &= ~DS2760_STATUS_PMOD;
+
+ ds2760_battery_write_status(di, status);
+
INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work);
di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev));
if (!di->monitor_wqueue) {
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
new file mode 100644
index 0000000..87b98bf
--- /dev/null
+++ b/drivers/power/max17040_battery.c
@@ -0,0 +1,309 @@
+/*
+ * max17040_battery.c
+ * fuel-gauge systems for lithium-ion (Li+) batteries
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Minkyu Kang <mk7.kang@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/power_supply.h>
+#include <linux/max17040_battery.h>
+
+#define MAX17040_VCELL_MSB 0x02
+#define MAX17040_VCELL_LSB 0x03
+#define MAX17040_SOC_MSB 0x04
+#define MAX17040_SOC_LSB 0x05
+#define MAX17040_MODE_MSB 0x06
+#define MAX17040_MODE_LSB 0x07
+#define MAX17040_VER_MSB 0x08
+#define MAX17040_VER_LSB 0x09
+#define MAX17040_RCOMP_MSB 0x0C
+#define MAX17040_RCOMP_LSB 0x0D
+#define MAX17040_CMD_MSB 0xFE
+#define MAX17040_CMD_LSB 0xFF
+
+#define MAX17040_DELAY 1000
+#define MAX17040_BATTERY_FULL 95
+
+struct max17040_chip {
+ struct i2c_client *client;
+ struct delayed_work work;
+ struct power_supply battery;
+ struct max17040_platform_data *pdata;
+
+ /* State Of Connect */
+ int online;
+ /* battery voltage */
+ int vcell;
+ /* battery capacity */
+ int soc;
+ /* State Of Charge */
+ int status;
+};
+
+static int max17040_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max17040_chip *chip = container_of(psy,
+ struct max17040_chip, battery);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = chip->status;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->online;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = chip->vcell;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = chip->soc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int max17040_write_reg(struct i2c_client *client, int reg, u8 value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int max17040_read_reg(struct i2c_client *client, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static void max17040_reset(struct i2c_client *client)
+{
+ max17040_write_reg(client, MAX17040_CMD_MSB, 0x54);
+ max17040_write_reg(client, MAX17040_CMD_LSB, 0x00);
+}
+
+static void max17040_get_vcell(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_VCELL_MSB);
+ lsb = max17040_read_reg(client, MAX17040_VCELL_LSB);
+
+ chip->vcell = (msb << 4) + (lsb >> 4);
+}
+
+static void max17040_get_soc(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_SOC_MSB);
+ lsb = max17040_read_reg(client, MAX17040_SOC_LSB);
+
+ chip->soc = msb;
+}
+
+static void max17040_get_version(struct i2c_client *client)
+{
+ u8 msb;
+ u8 lsb;
+
+ msb = max17040_read_reg(client, MAX17040_VER_MSB);
+ lsb = max17040_read_reg(client, MAX17040_VER_LSB);
+
+ dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb);
+}
+
+static void max17040_get_online(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ if (chip->pdata->battery_online)
+ chip->online = chip->pdata->battery_online();
+ else
+ chip->online = 1;
+}
+
+static void max17040_get_status(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
+ return;
+ }
+
+ if (chip->pdata->charger_online()) {
+ if (chip->pdata->charger_enable())
+ chip->status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ chip->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ } else {
+ chip->status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ if (chip->soc > MAX17040_BATTERY_FULL)
+ chip->status = POWER_SUPPLY_STATUS_FULL;
+}
+
+static void max17040_work(struct work_struct *work)
+{
+ struct max17040_chip *chip;
+
+ chip = container_of(work, struct max17040_chip, work.work);
+
+ max17040_get_vcell(chip->client);
+ max17040_get_soc(chip->client);
+ max17040_get_online(chip->client);
+ max17040_get_status(chip->client);
+
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+}
+
+static enum power_supply_property max17040_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int __devinit max17040_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct max17040_chip *chip;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ return -EIO;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->client = client;
+ chip->pdata = client->dev.platform_data;
+
+ i2c_set_clientdata(client, chip);
+
+ chip->battery.name = "battery";
+ chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->battery.get_property = max17040_get_property;
+ chip->battery.properties = max17040_battery_props;
+ chip->battery.num_properties = ARRAY_SIZE(max17040_battery_props);
+
+ ret = power_supply_register(&client->dev, &chip->battery);
+ if (ret) {
+ dev_err(&client->dev, "failed: power supply register\n");
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return ret;
+ }
+
+ max17040_reset(client);
+ max17040_get_version(client);
+
+ INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work);
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+
+ return 0;
+}
+
+static int __devexit max17040_remove(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ power_supply_unregister(&chip->battery);
+ cancel_delayed_work(&chip->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int max17040_suspend(struct i2c_client *client,
+ pm_message_t state)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ cancel_delayed_work(&chip->work);
+ return 0;
+}
+
+static int max17040_resume(struct i2c_client *client)
+{
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+ schedule_delayed_work(&chip->work, MAX17040_DELAY);
+ return 0;
+}
+
+#else
+
+#define max17040_suspend NULL
+#define max17040_resume NULL
+
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id max17040_id[] = {
+ { "max17040", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max17040_id);
+
+static struct i2c_driver max17040_i2c_driver = {
+ .driver = {
+ .name = "max17040",
+ },
+ .probe = max17040_probe,
+ .remove = __devexit_p(max17040_remove),
+ .suspend = max17040_suspend,
+ .resume = max17040_resume,
+ .id_table = max17040_id,
+};
+
+static int __init max17040_init(void)
+{
+ return i2c_add_driver(&max17040_i2c_driver);
+}
+module_init(max17040_init);
+
+static void __exit max17040_exit(void)
+{
+ i2c_del_driver(&max17040_i2c_driver);
+}
+module_exit(max17040_exit);
+
+MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
+MODULE_DESCRIPTION("MAX17040 Fuel Gauge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e5b84db..7498366 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -470,7 +470,7 @@
*/
static void dasd_change_state(struct dasd_device *device)
{
- int rc;
+ int rc;
if (device->state == device->target)
/* Already where we want to go today... */
@@ -479,8 +479,10 @@
rc = dasd_increase_state(device);
else
rc = dasd_decrease_state(device);
- if (rc && rc != -EAGAIN)
- device->target = device->state;
+ if (rc == -EAGAIN)
+ return;
+ if (rc)
+ device->target = device->state;
if (device->state == device->target) {
wake_up(&dasd_init_waitq);
@@ -2503,15 +2505,25 @@
if (IS_ERR(device))
return PTR_ERR(device);
+ /* allow new IO again */
+ device->stopped &= ~DASD_STOPPED_PM;
+ device->stopped &= ~DASD_UNRESUMED_PM;
+
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
if (device->discipline->restore)
rc = device->discipline->restore(device);
+ if (rc)
+ /*
+ * if the resume failed for the DASD we put it in
+ * an UNRESUMED stop state
+ */
+ device->stopped |= DASD_UNRESUMED_PM;
dasd_put_device(device);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 1c28ec3..f8b1f04 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3243,9 +3243,6 @@
int is_known, rc;
struct dasd_uid temp_uid;
- /* allow new IO again */
- device->stopped &= ~DASD_STOPPED_PM;
-
private = (struct dasd_eckd_private *) device->private;
/* Read Configuration Data */
@@ -3295,12 +3292,7 @@
return 0;
out_err:
- /*
- * if the resume failed for the DASD we put it in
- * an UNRESUMED stop state
- */
- device->stopped |= DASD_UNRESUMED_PM;
- return 0;
+ return -1;
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 04dc734..21639d6 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -20,10 +20,7 @@
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/reboot.h>
-
#include <linux/slab.h>
-#include <linux/bootmem.h>
-
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/io.h>
@@ -735,7 +732,7 @@
unsigned long flags;
/* Empty the output buffer, then prevent new I/O. */
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
raw->flags |= RAW3215_FROZEN;
@@ -749,7 +746,7 @@
unsigned long flags;
/* Allow I/O again and flush output buffer. */
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_FROZEN;
raw->flags |= RAW3215_FLUSHING;
@@ -883,7 +880,7 @@
raw3215_freelist = NULL;
spin_lock_init(&raw3215_freelist_lock);
for (i = 0; i < NR_3215_REQ; i++) {
- req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req));
+ req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
req->next = raw3215_freelist;
raw3215_freelist = req;
}
@@ -893,10 +890,9 @@
return -ENODEV;
raw3215[0] = raw = (struct raw3215_info *)
- alloc_bootmem_low(sizeof(struct raw3215_info));
- memset(raw, 0, sizeof(struct raw3215_info));
- raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
- raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
+ kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
+ raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+ raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
raw->cdev = cdev;
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
@@ -906,9 +902,9 @@
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
- free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE);
- free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
- free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
+ kfree(raw->inbuf);
+ kfree(raw->buffer);
+ kfree(raw);
raw3215[0] = NULL;
return -ENODEV;
}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 44d02e3..bb838bd 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -600,16 +599,14 @@
if (IS_ERR(rp))
return PTR_ERR(rp);
- condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270));
- memset(condev, 0, sizeof(struct con3270));
+ condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
condev->view.dev = rp;
- condev->read = raw3270_request_alloc_bootmem(0);
+ condev->read = raw3270_request_alloc(0);
condev->read->callback = con3270_read_callback;
condev->read->callback_data = condev;
- condev->write =
- raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE);
- condev->kreset = raw3270_request_alloc_bootmem(1);
+ condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
+ condev->kreset = raw3270_request_alloc(1);
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
@@ -623,7 +620,7 @@
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
- cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
+ cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
}
condev->cline = alloc_string(&condev->freemem, condev->view.cols);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 75a8831..7892550 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -320,7 +320,7 @@
goto out_path;
}
filp->private_data = monpriv;
- monreader_device->driver_data = monpriv;
+ dev_set_drvdata(&monreader_device, monpriv);
unlock_kernel();
return nonseekable_open(inode, filp);
@@ -463,7 +463,7 @@
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
- struct mon_private *monpriv = dev->driver_data;
+ struct mon_private *monpriv = dev_get_drvdata(&dev);
int rc;
if (!monpriv)
@@ -487,7 +487,7 @@
static int monreader_thaw(struct device *dev)
{
- struct mon_private *monpriv = dev->driver_data;
+ struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index acab7b2..d6a022f 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -143,33 +142,6 @@
return rq;
}
-#ifdef CONFIG_TN3270_CONSOLE
-/*
- * Allocate a new 3270 ccw request from bootmem. Only works very
- * early in the boot process. Only con3270.c should be using this.
- */
-struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
-{
- struct raw3270_request *rq;
-
- rq = alloc_bootmem_low(sizeof(struct raw3270));
-
- /* alloc output buffer. */
- if (size > 0)
- rq->buffer = alloc_bootmem_low(size);
- rq->size = size;
- INIT_LIST_HEAD(&rq->list);
-
- /*
- * Setup ccw.
- */
- rq->ccw.cda = __pa(rq->buffer);
- rq->ccw.flags = CCW_FLAG_SLI;
-
- return rq;
-}
-#endif
-
/*
* Free 3270 ccw request
*/
@@ -846,8 +818,8 @@
char *ascebc;
int rc;
- rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270));
- ascebc = (char *) alloc_bootmem(256);
+ rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+ ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
return ERR_PTR(rc);
@@ -1350,7 +1322,7 @@
struct raw3270_view *view;
unsigned long flags;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
@@ -1376,7 +1348,7 @@
struct raw3270 *rp;
unsigned long flags;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 336811a..ad698d3 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
-#include <linux/bootmem.h>
#include <linux/termios.h>
#include <linux/err.h>
#include <linux/reboot.h>
@@ -110,7 +109,7 @@
spin_lock_irqsave(&sclp_con_lock, flags);
if (timer_pending(&sclp_con_timer))
- del_timer_sync(&sclp_con_timer);
+ del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
@@ -298,8 +297,8 @@
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- list_add_tail((struct list_head *) page, &sclp_con_pages);
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ list_add_tail(page, &sclp_con_pages);
}
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 5518e24..178724f 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -20,7 +20,6 @@
#include <linux/major.h>
#include <linux/console.h>
#include <linux/kdev_t.h>
-#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/reboot.h>
@@ -601,10 +600,7 @@
list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page);
- if (slab_is_available())
- free_page((unsigned long) page);
- else
- free_bootmem((unsigned long) page, PAGE_SIZE);
+ free_page((unsigned long) page);
}
}
@@ -640,16 +636,12 @@
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
+ rc = -ENOMEM;
for (i = 0; i < num_pages; i++) {
- if (slab_is_available())
- page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- else
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- if (!page) {
- rc = -ENOMEM;
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
goto out;
- }
- list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+ list_add_tail(page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 595aa04..1d420d9 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -396,7 +396,7 @@
{
struct tape_device *device;
- device = cdev->dev.driver_data;
+ device = dev_get_drvdata(&cdev->dev);
if (!device) {
return -ENODEV;
}
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 411cfa3..c20a4fe 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -663,7 +663,7 @@
static int vmlogrdr_pm_prepare(struct device *dev)
{
int rc;
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0;
if (priv) {
@@ -753,7 +753,7 @@
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
- dev->driver_data = priv;
+ dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 7d9e67c..31b902e 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -170,7 +170,7 @@
*/
static int ur_pm_suspend(struct ccw_device *cdev)
{
- struct urdev *urd = cdev->dev.driver_data;
+ struct urdev *urd = dev_get_drvdata(&cdev->dev);
TRACE("ur_pm_suspend: cdev=%p\n", cdev);
if (urd->open_flag) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 13bcb81..b1241f8 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -351,15 +351,6 @@
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
/* prototypes for thin interrupt */
-void qdio_sync_after_thinint(struct qdio_q *q);
-int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
- int auto_ack);
-void qdio_check_outbound_after_thinint(struct qdio_q *q);
-int qdio_inbound_q_moved(struct qdio_q *q);
-void qdio_kick_handler(struct qdio_q *q);
-void qdio_stop_polling(struct qdio_q *q);
-int qdio_siga_sync_q(struct qdio_q *q);
-
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
@@ -392,4 +383,6 @@
int qdio_setup_init(void);
void qdio_setup_exit(void);
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state);
#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e3434b3..b8626d4 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -70,9 +70,8 @@
seq_printf(m, "slsb buffer states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
- qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
- get_buf_state(q, i, &state, 0);
+ debug_get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d79cf5b..0038750 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -231,8 +231,8 @@
return i;
}
-inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
- unsigned char *state, int auto_ack)
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, int auto_ack)
{
return get_buf_states(q, bufnr, state, 1, auto_ack);
}
@@ -276,7 +276,7 @@
QDIO_MAX_BUFFERS_PER_Q);
}
-static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
int cc;
@@ -293,7 +293,7 @@
return cc;
}
-inline int qdio_siga_sync_q(struct qdio_q *q)
+static inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
return qdio_siga_sync(q, 0, q->mask);
@@ -358,8 +358,7 @@
return cc;
}
-/* called from thinint inbound handler */
-void qdio_sync_after_thinint(struct qdio_q *q)
+static inline void qdio_sync_after_thinint(struct qdio_q *q)
{
if (pci_out_supported(q)) {
if (need_siga_sync_thinint(q))
@@ -370,7 +369,14 @@
qdio_siga_sync_q(q);
}
-inline void qdio_stop_polling(struct qdio_q *q)
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state)
+{
+ qdio_siga_sync_q(q);
+ return get_buf_states(q, bufnr, state, 1, 0);
+}
+
+static inline void qdio_stop_polling(struct qdio_q *q)
{
if (!q->u.in.polling)
return;
@@ -449,13 +455,6 @@
count--;
if (!count)
return;
-
- /*
- * Need to change all PRIMED buffers to NOT_INIT, otherwise
- * we're loosing initiative in the thinint code.
- */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
- count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
@@ -470,19 +469,13 @@
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /*
- * No siga sync here, as a PCI or we after a thin interrupt
- * will sync the queues.
- */
-
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
goto out;
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * already sync'ed the queues.
+ */
count = get_buf_states(q, q->first_to_check, &state, count, 1);
if (!count)
goto out;
@@ -490,14 +483,9 @@
switch (state) {
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
- /*
- * No siga-sync needed for non-qebsm here, as the inbound queue
- * will be synced on the next siga-r, resp.
- * tiqdio_is_inbound_q_done will do the siga-sync.
- */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
- goto check_next;
+ break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
@@ -516,7 +504,7 @@
return q->first_to_check;
}
-int qdio_inbound_q_moved(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q)
{
int bufnr;
@@ -524,35 +512,32 @@
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
- if (!need_siga_sync(q) && !pci_out_supported(q))
+ if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
q->u.in.timestamp = get_usecs();
-
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
return 1;
} else
return 0;
}
-static int qdio_inbound_q_done(struct qdio_q *q)
+static inline int qdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
- /*
- * We need that one for synchronization with the adapter, as it
- * does a kind of PCI avoidance.
- */
qdio_siga_sync_q(q);
-
get_buf_state(q, q->first_to_check, &state, 0);
+
if (state == SLSB_P_INPUT_PRIMED)
- /* we got something to do */
+ /* more work coming */
return 0;
- /* on VM, we don't poll, so the q is always done here */
- if (need_siga_sync(q) || pci_out_supported(q))
+ if (is_thinint_irq(q->irq_ptr))
+ return 1;
+
+ /* don't poll under z/VM */
+ if (MACHINE_IS_VM)
return 1;
/*
@@ -563,14 +548,11 @@
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
q->first_to_check);
return 1;
- } else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
- q->first_to_check);
+ } else
return 0;
- }
}
-void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q)
{
int start = q->first_to_kick;
int end = q->first_to_check;
@@ -619,7 +601,6 @@
goto again;
}
-/* inbound tasklet */
void qdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
@@ -642,11 +623,6 @@
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
return q->first_to_check;
@@ -661,13 +637,7 @@
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
- /*
- * We fetch all buffer states at once. get_buf_states may
- * return count < stop. For QEBSM we do not loop.
- */
- if (is_qebsm(q))
- break;
- goto check_next;
+ break;
case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
@@ -797,8 +767,7 @@
tasklet_schedule(&q->tasklet);
}
-/* called from thinint inbound tasklet */
-void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
{
struct qdio_q *out;
int i;
@@ -811,6 +780,46 @@
tasklet_schedule(&out->tasklet);
}
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound);
+ qdio_sync_after_thinint(q);
+
+ /*
+ * The interrupt could be caused by a PCI request. Check the
+ * PCI capable outbound queues.
+ */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_handler(q);
+
+ if (!qdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __tiqdio_inbound_processing(q);
+}
+
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
@@ -1488,18 +1497,13 @@
* @count: how many buffers to process
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count)
+ int q_nr, unsigned int bufnr, unsigned int count)
{
struct qdio_irq *irq_ptr;
- if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
- (count > QDIO_MAX_BUFFERS_PER_Q) ||
- (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
- if (!count)
- return 0;
-
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index c655d01..981a77e 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -43,9 +43,6 @@
};
static struct indicator_t *q_indicators;
-static void tiqdio_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
-
static int css_qdio_omit_svs;
static inline unsigned long do_clear_global_summary(void)
@@ -103,11 +100,6 @@
xchg(irq_ptr->dsci, 1);
}
-/*
- * we cannot stop the tiqdio tasklet here since it is for all
- * thinint qdio devices and it must run as long as there is a
- * thinint device left
- */
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
@@ -126,79 +118,39 @@
}
}
-static inline int tiqdio_inbound_q_done(struct qdio_q *q)
-{
- unsigned char state = 0;
-
- if (!atomic_read(&q->nr_buf_used))
- return 1;
-
- qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
-
- if (state == SLSB_P_INPUT_PRIMED)
- /* more work coming */
- return 0;
- return 1;
-}
-
static inline int shared_ind(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
-static void __tiqdio_inbound_processing(struct qdio_q *q)
-{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
- qdio_sync_after_thinint(q);
-
- /*
- * Maybe we have work on our outbound queues... at least
- * we have to check the PCI capable queues.
- */
- qdio_check_outbound_after_thinint(q);
-
- if (!qdio_inbound_q_moved(q))
- return;
-
- qdio_kick_handler(q);
-
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
-
- qdio_stop_polling(q);
- /*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
- */
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
-}
-
-void tiqdio_inbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
-
- __tiqdio_inbound_processing(q);
-}
-
-/* check for work on all inbound thinint queues */
-static void tiqdio_tasklet_fn(unsigned long data)
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @ind: pointer to adapter local summary indicator
+ * @drv_data: NULL
+ */
+static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
struct qdio_q *q;
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
-again:
+ qdio_perf_stat_inc(&perf_stats.thin_int);
+
+ /*
+ * SVS only when needed: issue SVS to benefit from iqdio interrupt
+ * avoidance (SVS clears adapter interrupt suppression overwrite)
+ */
+ if (!css_qdio_omit_svs)
+ do_clear_global_summary();
+
+ /*
+ * reset local summary indicator (tiqdio_alsi) to stop adapter
+ * interrupts for now
+ */
+ xchg((u8 *)ind, 0);
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
+ /* check for work on all inbound thinint queues */
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
@@ -226,37 +178,6 @@
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
}
-
- /* check for more work */
- if (*tiqdio_alsi) {
- xchg(tiqdio_alsi, 0);
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
- goto again;
- }
-}
-
-/**
- * tiqdio_thinint_handler - thin interrupt handler for qdio
- * @ind: pointer to adapter local summary indicator
- * @drv_data: NULL
- */
-static void tiqdio_thinint_handler(void *ind, void *drv_data)
-{
- qdio_perf_stat_inc(&perf_stats.thin_int);
-
- /*
- * SVS only when needed: issue SVS to benefit from iqdio interrupt
- * avoidance (SVS clears adapter interrupt suppression overwrite)
- */
- if (!css_qdio_omit_svs)
- do_clear_global_summary();
-
- /*
- * reset local summary indicator (tiqdio_alsi) to stop adapter
- * interrupts for now, the tasklet will clean all dsci's
- */
- xchg((u8 *)ind, 0);
- tasklet_hi_schedule(&tiqdio_tasklet);
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -376,5 +297,4 @@
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
- tasklet_kill(&tiqdio_tasklet);
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9c14840..727a809 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -54,6 +54,12 @@
static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long);
static inline void ap_schedule_poll_timer(void);
+static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
+static int ap_device_remove(struct device *dev);
+static int ap_device_probe(struct device *dev);
+static void ap_interrupt_handler(void *unused1, void *unused2);
+static void ap_reset(struct ap_device *ap_dev);
+static void ap_config_timeout(unsigned long ptr);
/*
* Module description.
@@ -101,6 +107,10 @@
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
static unsigned long long poll_timeout = 250000;
+/* Suspend flag */
+static int ap_suspend_flag;
+static struct bus_type ap_bus_type;
+
/**
* ap_using_interrupts() - Returns non-zero if interrupt support is
* available.
@@ -617,10 +627,79 @@
return retval;
}
+static int ap_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ unsigned long flags;
+
+ if (!ap_suspend_flag) {
+ ap_suspend_flag = 1;
+
+ /* Disable scanning for devices, thus we do not want to scan
+ * for them after removing.
+ */
+ del_timer_sync(&ap_config_timer);
+ if (ap_work_queue != NULL) {
+ destroy_workqueue(ap_work_queue);
+ ap_work_queue = NULL;
+ }
+ tasklet_disable(&ap_tasklet);
+ }
+ /* Poll on the device until all requests are finished. */
+ do {
+ flags = 0;
+ __ap_poll_device(ap_dev, &flags);
+ } while ((flags & 1) || (flags & 2));
+
+ ap_device_remove(dev);
+ return 0;
+}
+
+static int ap_bus_resume(struct device *dev)
+{
+ int rc = 0;
+ struct ap_device *ap_dev = to_ap_dev(dev);
+
+ if (ap_suspend_flag) {
+ ap_suspend_flag = 0;
+ if (!ap_interrupts_available())
+ ap_interrupt_indicator = NULL;
+ ap_device_probe(dev);
+ ap_reset(ap_dev);
+ setup_timer(&ap_dev->timeout, ap_request_timeout,
+ (unsigned long) ap_dev);
+ ap_scan_bus(NULL);
+ init_timer(&ap_config_timer);
+ ap_config_timer.function = ap_config_timeout;
+ ap_config_timer.data = 0;
+ ap_config_timer.expires = jiffies + ap_config_time * HZ;
+ add_timer(&ap_config_timer);
+ ap_work_queue = create_singlethread_workqueue("kapwork");
+ if (!ap_work_queue)
+ return -ENOMEM;
+ tasklet_enable(&ap_tasklet);
+ if (!ap_using_interrupts())
+ ap_schedule_poll_timer();
+ else
+ tasklet_schedule(&ap_tasklet);
+ if (ap_thread_flag)
+ rc = ap_poll_thread_start();
+ } else {
+ ap_device_probe(dev);
+ ap_reset(ap_dev);
+ setup_timer(&ap_dev->timeout, ap_request_timeout,
+ (unsigned long) ap_dev);
+ }
+
+ return rc;
+}
+
static struct bus_type ap_bus_type = {
.name = "ap",
.match = &ap_bus_match,
.uevent = &ap_uevent,
+ .suspend = ap_bus_suspend,
+ .resume = ap_bus_resume
};
static int ap_device_probe(struct device *dev)
@@ -1066,7 +1145,7 @@
*/
static inline void ap_schedule_poll_timer(void)
{
- if (ap_using_interrupts())
+ if (ap_using_interrupts() || ap_suspend_flag)
return;
if (hrtimer_is_queued(&ap_poll_timer))
return;
@@ -1384,6 +1463,8 @@
set_user_nice(current, 19);
while (1) {
+ if (ap_suspend_flag)
+ return 0;
if (need_resched()) {
schedule();
continue;
@@ -1414,7 +1495,7 @@
{
int rc;
- if (ap_using_interrupts())
+ if (ap_using_interrupts() || ap_suspend_flag)
return 0;
mutex_lock(&ap_poll_thread_mutex);
if (!ap_poll_kthread) {
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 52574ce..8c36eaf 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1307,7 +1307,7 @@
*/
static int netiucv_pm_freeze(struct device *dev)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL;
int rc = 0;
@@ -1331,7 +1331,7 @@
*/
static int netiucv_pm_restore_thaw(struct device *dev)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL;
int rc = 0;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 6a19ed9..9c23122 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -258,10 +258,21 @@
or async on the kernel's command line.
config SCSI_WAIT_SCAN
- tristate
+ tristate # No prompt here, this is an invisible symbol.
default m
depends on SCSI
depends on MODULES
+# scsi_wait_scan is a loadable module which waits until all the async scans are
+# complete. The idea is to use it in initrd/ initramfs scripts. You modprobe
+# it after all the modprobes of the root SCSI drivers and it will wait until
+# they have all finished scanning their buses before allowing the boot to
+# proceed. (This method is not applicable if targets boot independently in
+# parallel with the initiator, or with transports with non-deterministic target
+# discovery schemes, or if a transport driver does not support scsi_wait_scan.)
+#
+# This symbol is not exposed as a prompt because little is to be gained by
+# disabling it, whereas people who accidentally switch it off may wonder why
+# their mkinitrd gets into trouble.
menu "SCSI Transports"
depends on SCSI
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index b62b482..1e9f714 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -1,6 +1,8 @@
config SCSI_BNX2_ISCSI
tristate "Broadcom NetXtreme II iSCSI support"
select SCSI_ISCSI_ATTRS
+ select NETDEVICES
+ select NETDEV_1000
select CNIC
depends on PCI
---help---
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
index 99c9125..344fd53 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.c
@@ -206,6 +206,31 @@
return DDP_PGIDX_MAX;
}
+/**
+ * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE
+ * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
+ */
+int cxgb3i_ddp_adjust_page_table(void)
+{
+ int i;
+ unsigned int base_order, order;
+
+ if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
+ ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n",
+ PAGE_SIZE, 1UL << ddp_page_shift[0]);
+ return -EINVAL;
+ }
+
+ base_order = get_order(1UL << ddp_page_shift[0]);
+ order = get_order(1 << PAGE_SHIFT);
+ for (i = 0; i < DDP_PGIDX_MAX; i++) {
+ /* first is the kernel page size, then just doubling the size */
+ ddp_page_order[i] = order - base_order + i;
+ ddp_page_shift[i] = PAGE_SHIFT + i;
+ }
+ return 0;
+}
+
static inline void ddp_gl_unmap(struct pci_dev *pdev,
struct cxgb3i_gather_list *gl)
{
@@ -598,30 +623,40 @@
* release all the resource held by the ddp pagepod manager for a given
* adapter if needed
*/
+
+static void ddp_cleanup(struct kref *kref)
+{
+ struct cxgb3i_ddp_info *ddp = container_of(kref,
+ struct cxgb3i_ddp_info,
+ refcnt);
+ int i = 0;
+
+ ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev);
+
+ ddp->tdev->ulp_iscsi = NULL;
+ while (i < ddp->nppods) {
+ struct cxgb3i_gather_list *gl = ddp->gl_map[i];
+ if (gl) {
+ int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
+ >> PPOD_PAGES_SHIFT;
+ ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
+ ddp->tdev, i, npods);
+ kfree(gl);
+ ddp_free_gl_skb(ddp, i, npods);
+ i += npods;
+ } else
+ i++;
+ }
+ cxgb3i_free_big_mem(ddp);
+}
+
void cxgb3i_ddp_cleanup(struct t3cdev *tdev)
{
- int i = 0;
struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi;
ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp);
-
- if (ddp) {
- tdev->ulp_iscsi = NULL;
- while (i < ddp->nppods) {
- struct cxgb3i_gather_list *gl = ddp->gl_map[i];
- if (gl) {
- int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
- >> PPOD_PAGES_SHIFT;
- ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
- tdev, i, npods);
- kfree(gl);
- ddp_free_gl_skb(ddp, i, npods);
- i += npods;
- } else
- i++;
- }
- cxgb3i_free_big_mem(ddp);
- }
+ if (ddp)
+ kref_put(&ddp->refcnt, ddp_cleanup);
}
/**
@@ -631,12 +666,13 @@
*/
static void ddp_init(struct t3cdev *tdev)
{
- struct cxgb3i_ddp_info *ddp;
+ struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi;
struct ulp_iscsi_info uinfo;
unsigned int ppmax, bits;
int i, err;
- if (tdev->ulp_iscsi) {
+ if (ddp) {
+ kref_get(&ddp->refcnt);
ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
tdev, tdev->ulp_iscsi);
return;
@@ -670,6 +706,7 @@
ppmax *
sizeof(struct cxgb3i_gather_list *));
spin_lock_init(&ddp->map_lock);
+ kref_init(&ddp->refcnt);
ddp->tdev = tdev;
ddp->pdev = uinfo.pdev;
@@ -715,6 +752,17 @@
{
if (page_idx == DDP_PGIDX_MAX) {
page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
+
+ if (page_idx == DDP_PGIDX_MAX) {
+ ddp_log_info("system PAGE_SIZE %lu, update hw.\n",
+ PAGE_SIZE);
+ if (cxgb3i_ddp_adjust_page_table() < 0) {
+ ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n",
+ PAGE_SIZE);
+ return;
+ }
+ page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE);
+ }
ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
PAGE_SIZE, page_idx);
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 0d296de..87dd56b 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -54,6 +54,7 @@
* struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload
*
* @list: list head to link elements
+ * @refcnt: ref. count
* @tdev: pointer to t3cdev used by cxgb3 driver
* @max_txsz: max tx packet size for ddp
* @max_rxsz: max rx packet size for ddp
@@ -70,6 +71,7 @@
*/
struct cxgb3i_ddp_info {
struct list_head list;
+ struct kref refcnt;
struct t3cdev *tdev;
struct pci_dev *pdev;
unsigned int max_txsz;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index c15878e..0a5609b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -45,8 +45,6 @@
#include "fcoe.h"
-static int debug_fcoe;
-
MODULE_AUTHOR("Open-FCoE.org");
MODULE_DESCRIPTION("FCoE");
MODULE_LICENSE("GPL v2");
@@ -305,23 +303,22 @@
#ifdef NETIF_F_FCOE_CRC
if (netdev->features & NETIF_F_FCOE_CRC) {
lp->crc_offload = 1;
- printk(KERN_DEBUG "fcoe:%s supports FCCRC offload\n",
- netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
}
#endif
#ifdef NETIF_F_FSO
if (netdev->features & NETIF_F_FSO) {
lp->seq_offload = 1;
lp->lso_max = netdev->gso_max_size;
- printk(KERN_DEBUG "fcoe:%s supports LSO for max len 0x%x\n",
- netdev->name, lp->lso_max);
+ FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
+ lp->lso_max);
}
#endif
if (netdev->fcoe_ddp_xid) {
lp->lro_enabled = 1;
lp->lro_xid = netdev->fcoe_ddp_xid;
- printk(KERN_DEBUG "fcoe:%s supports LRO for max xid 0x%x\n",
- netdev->name, lp->lro_xid);
+ FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
+ lp->lro_xid);
}
skb_queue_head_init(&fc->fcoe_pending_queue);
fc->fcoe_pending_queue_active = 0;
@@ -407,7 +404,8 @@
/* add the new host to the SCSI-ml */
rc = scsi_add_host(lp->host, dev);
if (rc) {
- FC_DBG("fcoe_shost_config:error on scsi_add_host\n");
+ FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
+ "error on scsi_add_host\n");
return rc;
}
sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
@@ -448,8 +446,7 @@
BUG_ON(!netdev);
- printk(KERN_DEBUG "fcoe_if_destroy:interface on %s\n",
- netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
lp = fcoe_hostlist_lookup(netdev);
if (!lp)
@@ -560,8 +557,7 @@
BUG_ON(!netdev);
- printk(KERN_DEBUG "fcoe_if_create:interface on %s\n",
- netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Create Interface\n");
lp = fcoe_hostlist_lookup(netdev);
if (lp)
@@ -570,7 +566,7 @@
shost = libfc_host_alloc(&fcoe_shost_template,
sizeof(struct fcoe_softc));
if (!shost) {
- FC_DBG("Could not allocate host structure\n");
+ FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
return -ENOMEM;
}
lp = shost_priv(shost);
@@ -579,7 +575,8 @@
/* configure fc_lport, e.g., em */
rc = fcoe_lport_config(lp);
if (rc) {
- FC_DBG("Could not configure lport\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
+ "interface\n");
goto out_host_put;
}
@@ -593,28 +590,32 @@
/* configure lport network properties */
rc = fcoe_netdev_config(lp, netdev);
if (rc) {
- FC_DBG("Could not configure netdev for the interface\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
+ "interface\n");
goto out_netdev_cleanup;
}
/* configure lport scsi host properties */
rc = fcoe_shost_config(lp, shost, &netdev->dev);
if (rc) {
- FC_DBG("Could not configure shost for lport\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
+ "interface\n");
goto out_netdev_cleanup;
}
/* lport exch manager allocation */
rc = fcoe_em_config(lp);
if (rc) {
- FC_DBG("Could not configure em for lport\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
+ "interface\n");
goto out_netdev_cleanup;
}
/* Initialize the library */
rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
if (rc) {
- FC_DBG("Could not configure libfc for lport!\n");
+ FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
+ "interface\n");
goto out_lp_destroy;
}
@@ -653,7 +654,7 @@
fc_attach_transport(&fcoe_transport_function);
if (!scsi_transport_fcoe_sw) {
- printk(KERN_ERR "fcoe_init:fc_attach_transport() failed\n");
+ printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
return -ENODEV;
}
@@ -714,7 +715,7 @@
unsigned targ_cpu = smp_processor_id();
#endif /* CONFIG_SMP */
- printk(KERN_DEBUG "fcoe: Destroying receive thread for CPU %d\n", cpu);
+ FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
/* Prevent any new skbs from being queued for this CPU. */
p = &per_cpu(fcoe_percpu, cpu);
@@ -736,8 +737,8 @@
p0 = &per_cpu(fcoe_percpu, targ_cpu);
spin_lock_bh(&p0->fcoe_rx_list.lock);
if (p0->thread) {
- FC_DBG("Moving frames from CPU %d to CPU %d\n",
- cpu, targ_cpu);
+ FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
+ cpu, targ_cpu);
while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
__skb_queue_tail(&p0->fcoe_rx_list, skb);
@@ -803,12 +804,12 @@
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- FC_DBG("CPU %x online: Create Rx thread\n", cpu);
+ FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
fcoe_percpu_thread_create(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- FC_DBG("CPU %x offline: Remove Rx thread\n", cpu);
+ FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
fcoe_percpu_thread_destroy(cpu);
break;
default:
@@ -846,24 +847,21 @@
fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
lp = fc->ctlr.lp;
if (unlikely(lp == NULL)) {
- FC_DBG("cannot find hba structure");
+ FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
goto err2;
}
if (!lp->link_up)
goto err2;
- if (unlikely(debug_fcoe)) {
- FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
- "end:%p sum:%d dev:%s", skb->len, skb->data_len,
- skb->head, skb->data, skb_tail_pointer(skb),
- skb_end_pointer(skb), skb->csum,
- skb->dev ? skb->dev->name : "<NULL>");
-
- }
+ FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
+ "data:%p tail:%p end:%p sum:%d dev:%s",
+ skb->len, skb->data_len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb),
+ skb->csum, skb->dev ? skb->dev->name : "<NULL>");
/* check for FCOE packet type */
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
- FC_DBG("wrong FC type frame");
+ FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
goto err;
}
@@ -901,8 +899,9 @@
* the first CPU now. For non-SMP systems this
* will check the same CPU twice.
*/
- FC_DBG("CPU is online, but no receive thread ready "
- "for incoming skb- using first online CPU.\n");
+ FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
+ "ready for incoming skb- using first online "
+ "CPU.\n");
spin_unlock_bh(&fps->fcoe_rx_list.lock);
cpu = first_cpu(cpu_online_map);
@@ -1201,19 +1200,17 @@
fr = fcoe_dev_from_skb(skb);
lp = fr->fr_dev;
if (unlikely(lp == NULL)) {
- FC_DBG("invalid HBA Structure");
+ FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure");
kfree_skb(skb);
continue;
}
- if (unlikely(debug_fcoe)) {
- FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
- "tail:%p end:%p sum:%d dev:%s",
- skb->len, skb->data_len,
- skb->head, skb->data, skb_tail_pointer(skb),
- skb_end_pointer(skb), skb->csum,
- skb->dev ? skb->dev->name : "<NULL>");
- }
+ FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
+ "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
+ skb->len, skb->data_len,
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->csum,
+ skb->dev ? skb->dev->name : "<NULL>");
/*
* Save source MAC address before discarding header.
@@ -1233,7 +1230,7 @@
stats = fc_lport_get_stats(lp);
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
- printk(KERN_WARNING "FCoE version "
+ printk(KERN_WARNING "fcoe: FCoE version "
"mismatch: The frame has "
"version %x, but the "
"initiator supports version "
@@ -1286,7 +1283,7 @@
if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
if (le32_to_cpu(fr_crc(fp)) !=
~crc32(~0, skb->data, fr_len)) {
- if (debug_fcoe || stats->InvalidCRCCount < 5)
+ if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping "
"frame with CRC error\n");
stats->InvalidCRCCount++;
@@ -1432,7 +1429,8 @@
case NETDEV_REGISTER:
break;
default:
- FC_DBG("Unknown event %ld from netdev netlink\n", event);
+ FCOE_NETDEV_DBG(real_dev, "Unknown event %ld "
+ "from netdev netlink\n", event);
}
if (link_possible && !fcoe_link_ok(lp))
fcoe_ctlr_link_up(&fc->ctlr);
@@ -1505,8 +1503,8 @@
owner = fcoe_netdev_to_module_owner(netdev);
if (owner) {
- printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
- module_name(owner), netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n",
+ module_name(owner));
return try_module_get(owner);
}
return -ENODEV;
@@ -1527,8 +1525,8 @@
owner = fcoe_netdev_to_module_owner(netdev);
if (owner) {
- printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
- module_name(owner), netdev->name);
+ FCOE_NETDEV_DBG(netdev, "Release driver module %s\n",
+ module_name(owner));
module_put(owner);
return 0;
}
@@ -1559,7 +1557,7 @@
}
rc = fcoe_if_destroy(netdev);
if (rc) {
- printk(KERN_ERR "fcoe: fcoe_if_destroy(%s) failed\n",
+ printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n",
netdev->name);
rc = -EIO;
goto out_putdev;
@@ -1598,7 +1596,7 @@
rc = fcoe_if_create(netdev);
if (rc) {
- printk(KERN_ERR "fcoe: fcoe_if_create(%s) failed\n",
+ printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
fcoe_ethdrv_put(netdev);
rc = -EIO;
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index a1eb8c1..0d724fa 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,6 +40,30 @@
#define FCOE_MIN_XID 0x0001 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x07ef /* the max xid supported by fcoe_sw */
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+#define FCOE_LOGGING 0x01 /* General logging, not categorized */
+#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
+
+#define FCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(fcoe_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0);
+
+#define FCOE_DBG(fmt, args...) \
+ FCOE_CHECK_LOGGING(FCOE_LOGGING, \
+ printk(KERN_INFO "fcoe: " fmt, ##args);)
+
+#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
+ FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
+ printk(KERN_INFO "fcoe: %s" fmt, \
+ netdev->name, ##args);)
+
/*
* this percpu struct for fcoe
*/
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 2f5bc7f..f544340 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -56,15 +56,28 @@
static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
-static u32 fcoe_ctlr_debug; /* 1 for basic, 2 for noisy debug */
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-#define FIP_DBG_LVL(level, fmt, args...) \
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(libfcoe_debug_logging & LEVEL)) \
do { \
- if (fcoe_ctlr_debug >= (level)) \
- FC_DBG(fmt, ##args); \
- } while (0)
+ CMD; \
+ } while (0); \
+} while (0);
-#define FIP_DBG(fmt, args...) FIP_DBG_LVL(1, fmt, ##args)
+#define LIBFCOE_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
+ printk(KERN_INFO "libfcoe: " fmt, ##args);)
+
+#define LIBFCOE_FIP_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
+ printk(KERN_INFO "fip: " fmt, ##args);)
/*
* Return non-zero if FCF fcoe_size has been validated.
@@ -243,7 +256,7 @@
fip->last_link = 1;
fip->link = 1;
spin_unlock_bh(&fip->lock);
- FIP_DBG("%s", "setting AUTO mode.\n");
+ LIBFCOE_FIP_DBG("%s", "setting AUTO mode.\n");
fc_linkup(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
} else
@@ -614,7 +627,8 @@
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN);
if (!is_valid_ether_addr(fcf->fcf_mac)) {
- FIP_DBG("invalid MAC addr in FIP adv\n");
+ LIBFCOE_FIP_DBG("Invalid MAC address "
+ "in FIP adv\n");
return -EINVAL;
}
break;
@@ -647,8 +661,8 @@
case FIP_DT_LOGO:
case FIP_DT_ELP:
default:
- FIP_DBG("unexpected descriptor type %x in FIP adv\n",
- desc->fip_dtype);
+ LIBFCOE_FIP_DBG("unexpected descriptor type %x "
+ "in FIP adv\n", desc->fip_dtype);
/* standard says ignore unknown descriptors >= 128 */
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
return -EINVAL;
@@ -664,8 +678,8 @@
return 0;
len_err:
- FIP_DBG("FIP length error in descriptor type %x len %zu\n",
- desc->fip_dtype, dlen);
+ LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
+ desc->fip_dtype, dlen);
return -EINVAL;
}
@@ -728,9 +742,10 @@
}
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
- FIP_DBG_LVL(found ? 2 : 1, "%s FCF for fab %llx map %x val %d\n",
- found ? "old" : "new",
- fcf->fabric_name, fcf->fc_map, mtu_valid);
+ if (!found) {
+ LIBFCOE_FIP_DBG("New FCF for fab %llx map %x val %d\n",
+ fcf->fabric_name, fcf->fc_map, mtu_valid);
+ }
/*
* If this advertisement is not solicited and our max receive size
@@ -807,7 +822,8 @@
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN);
if (!is_valid_ether_addr(granted_mac)) {
- FIP_DBG("invalid MAC addrs in FIP ELS\n");
+ LIBFCOE_FIP_DBG("Invalid MAC address "
+ "in FIP ELS\n");
goto drop;
}
break;
@@ -825,8 +841,8 @@
els_dtype = desc->fip_dtype;
break;
default:
- FIP_DBG("unexpected descriptor type %x "
- "in FIP adv\n", desc->fip_dtype);
+ LIBFCOE_FIP_DBG("unexpected descriptor type %x "
+ "in FIP adv\n", desc->fip_dtype);
/* standard says ignore unknown descriptors >= 128 */
if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
goto drop;
@@ -867,8 +883,8 @@
return;
len_err:
- FIP_DBG("FIP length error in descriptor type %x len %zu\n",
- desc->fip_dtype, dlen);
+ LIBFCOE_FIP_DBG("FIP length error in descriptor type %x len %zu\n",
+ desc->fip_dtype, dlen);
drop:
kfree_skb(skb);
}
@@ -894,7 +910,7 @@
struct fc_lport *lp = fip->lp;
u32 desc_mask;
- FIP_DBG("Clear Virtual Link received\n");
+ LIBFCOE_FIP_DBG("Clear Virtual Link received\n");
if (!fcf)
return;
if (!fcf || !fc_host_port_id(lp->host))
@@ -952,9 +968,9 @@
* reset only if all required descriptors were present and valid.
*/
if (desc_mask) {
- FIP_DBG("missing descriptors mask %x\n", desc_mask);
+ LIBFCOE_FIP_DBG("missing descriptors mask %x\n", desc_mask);
} else {
- FIP_DBG("performing Clear Virtual Link\n");
+ LIBFCOE_FIP_DBG("performing Clear Virtual Link\n");
fcoe_ctlr_reset(fip, FIP_ST_ENABLED);
}
}
@@ -1002,10 +1018,6 @@
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
- FIP_DBG_LVL(2, "ver %x op %x/%x dl %x fl %x\n",
- FIP_VER_DECAPS(fiph->fip_ver), op, sub,
- ntohs(fiph->fip_dl_len), ntohs(fiph->fip_flags));
-
if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
goto drop;
if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
@@ -1017,7 +1029,7 @@
fip->map_dest = 0;
fip->state = FIP_ST_ENABLED;
state = FIP_ST_ENABLED;
- FIP_DBG("using FIP mode\n");
+ LIBFCOE_FIP_DBG("Using FIP mode\n");
}
spin_unlock_bh(&fip->lock);
if (state != FIP_ST_ENABLED)
@@ -1052,14 +1064,15 @@
struct fcoe_fcf *best = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
- FIP_DBG("consider FCF for fab %llx VFID %d map %x val %d\n",
- fcf->fabric_name, fcf->vfid,
- fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
+ LIBFCOE_FIP_DBG("consider FCF for fab %llx VFID %d map %x "
+ "val %d\n", fcf->fabric_name, fcf->vfid,
+ fcf->fc_map, fcoe_ctlr_mtu_valid(fcf));
if (!fcoe_ctlr_fcf_usable(fcf)) {
- FIP_DBG("FCF for fab %llx map %x %svalid %savailable\n",
- fcf->fabric_name, fcf->fc_map,
- (fcf->flags & FIP_FL_SOL) ? "" : "in",
- (fcf->flags & FIP_FL_AVAIL) ? "" : "un");
+ LIBFCOE_FIP_DBG("FCF for fab %llx map %x %svalid "
+ "%savailable\n", fcf->fabric_name,
+ fcf->fc_map, (fcf->flags & FIP_FL_SOL)
+ ? "" : "in", (fcf->flags & FIP_FL_AVAIL)
+ ? "" : "un");
continue;
}
if (!best) {
@@ -1069,7 +1082,8 @@
if (fcf->fabric_name != best->fabric_name ||
fcf->vfid != best->vfid ||
fcf->fc_map != best->fc_map) {
- FIP_DBG("conflicting fabric, VFID, or FC-MAP\n");
+ LIBFCOE_FIP_DBG("Conflicting fabric, VFID, "
+ "or FC-MAP\n");
return;
}
if (fcf->pri < best->pri)
@@ -1113,7 +1127,7 @@
if (sel != fcf) {
fcf = sel; /* the old FCF may have been freed */
if (sel) {
- printk(KERN_INFO "host%d: FIP selected "
+ printk(KERN_INFO "libfcoe: host%d: FIP selected "
"Fibre-Channel Forwarder MAC %s\n",
fip->lp->host->host_no,
print_mac(buf, sel->fcf_mac));
@@ -1123,7 +1137,7 @@
fip->ctlr_ka_time = jiffies + sel->fka_period;
fip->link = 1;
} else {
- printk(KERN_NOTICE "host%d: "
+ printk(KERN_NOTICE "libfcoe: host%d: "
"FIP Fibre-Channel Forwarder timed out. "
"Starting FCF discovery.\n",
fip->lp->host->host_no);
@@ -1247,7 +1261,7 @@
return -EINVAL;
}
fip->state = FIP_ST_NON_FIP;
- FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n");
+ LIBFCOE_FIP_DBG("received FLOGI LS_ACC using non-FIP mode\n");
/*
* FLOGI accepted.
@@ -1276,7 +1290,7 @@
memcpy(fip->dest_addr, sa, ETH_ALEN);
fip->map_dest = 0;
if (fip->state == FIP_ST_NON_FIP)
- FIP_DBG("received FLOGI REQ, "
+ LIBFCOE_FIP_DBG("received FLOGI REQ, "
"using non-FIP mode\n");
fip->state = FIP_ST_NON_FIP;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 89d41a4..5fd2da4 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -40,7 +40,7 @@
#include "scsi_logging.h"
-static int scsi_host_next_hn; /* host_no for next new host */
+static atomic_t scsi_host_next_hn; /* host_no for next new host */
static void scsi_host_cls_release(struct device *dev)
@@ -333,7 +333,11 @@
mutex_init(&shost->scan_mutex);
- shost->host_no = scsi_host_next_hn++; /* XXX(hch): still racy */
+ /*
+ * subtract one because we increment first then return, but we need to
+ * know what the next host number was before increment
+ */
+ shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
shost->dma_channel = 0xff;
/* These three are default values which can be overridden */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b4b805e..166d964 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2254,10 +2254,13 @@
continue;
if (crq->node_name && tgt->ids.node_name != crq->node_name)
continue;
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO)
+ tgt->logo_rcvd = 1;
+ if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) {
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ ibmvfc_reinit_host(vhost);
+ }
}
-
- ibmvfc_reinit_host(vhost);
break;
case IBMVFC_AE_LINK_DOWN:
case IBMVFC_AE_ADAPTER_FAILED:
@@ -2783,27 +2786,27 @@
spin_lock_irqsave(vhost->host->host_lock, flags);
while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
- ibmvfc_handle_crq(crq, vhost);
- crq->valid = 0;
- }
-
/* Pull all the valid messages off the async CRQ */
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
ibmvfc_handle_async(async, vhost);
async->valid = 0;
}
- vio_enable_interrupts(vdev);
- if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
- vio_disable_interrupts(vdev);
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
- } else if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
+ }
+
+ vio_enable_interrupts(vdev);
+ if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
async->valid = 0;
+ } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
+ vio_disable_interrupts(vdev);
+ ibmvfc_handle_crq(crq, vhost);
+ crq->valid = 0;
} else
done = 1;
}
@@ -2927,7 +2930,11 @@
break;
case IBMVFC_MAD_FAILED:
default:
- if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED)
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ else if (tgt->logo_rcvd)
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+ else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
@@ -3054,6 +3061,7 @@
return;
kref_get(&tgt->kref);
+ tgt->logo_rcvd = 0;
evt = ibmvfc_get_event(vhost);
vhost->discovery_threads++;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index c2668d7..007fa1c9 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -605,6 +605,7 @@
int need_login;
int add_rport;
int init_retries;
+ int logo_rcvd;
u32 cancel_key;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0f8bc77..5f04550 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -131,13 +131,13 @@
};
static const struct ipr_chip_t ipr_chip[] = {
- { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
- { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
+ { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] },
+ { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] }
};
static int ipr_max_bus_speeds [] = {
@@ -7367,6 +7367,7 @@
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
+ init_waitqueue_head(&ioa_cfg->msi_wait_q);
ioa_cfg->sdt_state = INACTIVE;
if (ipr_enable_cache)
ioa_cfg->cache_state = CACHE_ENABLED;
@@ -7398,25 +7399,108 @@
}
/**
- * ipr_get_chip_cfg - Find adapter chip configuration
+ * ipr_get_chip_info - Find adapter chip information
* @dev_id: PCI device id struct
*
* Return value:
- * ptr to chip config on success / NULL on failure
+ * ptr to chip information on success / NULL on failure
**/
-static const struct ipr_chip_cfg_t * __devinit
-ipr_get_chip_cfg(const struct pci_device_id *dev_id)
+static const struct ipr_chip_t * __devinit
+ipr_get_chip_info(const struct pci_device_id *dev_id)
{
int i;
for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
if (ipr_chip[i].vendor == dev_id->vendor &&
ipr_chip[i].device == dev_id->device)
- return ipr_chip[i].cfg;
+ return &ipr_chip[i];
return NULL;
}
/**
+ * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
+ * @pdev: PCI device struct
+ *
+ * Description: Simply set the msi_received flag to 1 indicating that
+ * Message Signaled Interrupts are supported.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
+ unsigned long lock_flags = 0;
+ irqreturn_t rc = IRQ_HANDLED;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+ ioa_cfg->msi_received = 1;
+ wake_up(&ioa_cfg->msi_wait_q);
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+}
+
+/**
+ * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
+ * @pdev: PCI device struct
+ *
+ * Description: The return value from pci_enable_msi() can not always be
+ * trusted. This routine sets up and initiates a test interrupt to determine
+ * if the interrupt is received via the ipr_test_intr() service routine.
+ * If the tests fails, the driver will fall back to LSI.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
+ struct pci_dev *pdev)
+{
+ int rc;
+ volatile u32 int_reg;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ init_waitqueue_head(&ioa_cfg->msi_wait_q);
+ ioa_cfg->msi_received = 0;
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ if (rc) {
+ dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+ return rc;
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+
+ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+ wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (!ioa_cfg->msi_received) {
+ /* MSI test failed */
+ dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
+ rc = -EOPNOTSUPP;
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "MSI test succeeded.\n");
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ free_irq(pdev->irq, ioa_cfg);
+
+ LEAVE;
+
+ return rc;
+}
+
+/**
* ipr_probe_ioa - Allocates memory and does first stage of initialization
* @pdev: PCI device struct
* @dev_id: PCI device id struct
@@ -7441,11 +7525,6 @@
goto out;
}
- if (!(rc = pci_enable_msi(pdev)))
- dev_info(&pdev->dev, "MSI enabled\n");
- else if (ipr_debug)
- dev_info(&pdev->dev, "Cannot enable MSI\n");
-
dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
@@ -7461,14 +7540,16 @@
ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
sata_port_info.flags, &ipr_sata_ops);
- ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
+ ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
- if (!ioa_cfg->chip_cfg) {
+ if (!ioa_cfg->ipr_chip) {
dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
dev_id->vendor, dev_id->device);
goto out_scsi_host_put;
}
+ ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
+
if (ipr_transop_timeout)
ioa_cfg->transop_timeout = ipr_transop_timeout;
else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
@@ -7519,6 +7600,18 @@
goto cleanup_nomem;
}
+ /* Enable MSI style interrupts if they are supported. */
+ if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
+ rc = ipr_test_msi(ioa_cfg, pdev);
+ if (rc == -EOPNOTSUPP)
+ pci_disable_msi(pdev);
+ else if (rc)
+ goto out_msi_disable;
+ else
+ dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
+ } else if (ipr_debug)
+ dev_info(&pdev->dev, "Cannot enable MSI.\n");
+
/* Save away PCI config space for use following IOA reset */
rc = pci_save_state(pdev);
@@ -7556,7 +7649,9 @@
ioa_cfg->ioa_unit_checked = 1;
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
+ rc = request_irq(pdev->irq, ipr_isr,
+ ioa_cfg->msi_received ? 0 : IRQF_SHARED,
+ IPR_NAME, ioa_cfg);
if (rc) {
dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
@@ -7583,12 +7678,13 @@
ipr_free_mem(ioa_cfg);
cleanup_nomem:
iounmap(ipr_regs);
+out_msi_disable:
+ pci_disable_msi(pdev);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
scsi_host_put(host);
out_disable:
- pci_disable_msi(pdev);
pci_disable_device(pdev);
goto out;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 79a3ae4..4b63dd6 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.4.2"
-#define IPR_DRIVER_DATE "(January 21, 2009)"
+#define IPR_DRIVER_VERSION "2.4.3"
+#define IPR_DRIVER_DATE "(June 10, 2009)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1025,6 +1025,9 @@
struct ipr_chip_t {
u16 vendor;
u16 device;
+ u16 intr_type;
+#define IPR_USE_LSI 0x00
+#define IPR_USE_MSI 0x01
const struct ipr_chip_cfg_t *cfg;
};
@@ -1094,6 +1097,7 @@
u8 needs_hard_reset:1;
u8 dual_raid:1;
u8 needs_warm_reset:1;
+ u8 msi_received:1;
u8 revid;
@@ -1159,6 +1163,7 @@
unsigned int transop_timeout;
const struct ipr_chip_cfg_t *chip_cfg;
+ const struct ipr_chip_t *ipr_chip;
void __iomem *hdw_dma_regs; /* iomapped PCI memory space */
unsigned long hdw_dma_regs_pci; /* raw PCI memory space */
@@ -1179,6 +1184,7 @@
struct work_struct work_q;
wait_queue_head_t reset_wait_q;
+ wait_queue_head_t msi_wait_q;
struct ipr_dump *dump;
enum ipr_sdt_state sdt_state;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b7c092d..518dbd9 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -253,8 +253,6 @@
if (r < 0) {
iscsi_tcp_segment_unmap(segment);
- if (copied || r == -EAGAIN)
- break;
return r;
}
copied += r;
@@ -275,11 +273,17 @@
while (1) {
rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
- if (rc < 0) {
+ /*
+ * We may not have been able to send data because the conn
+ * is getting stopped. libiscsi will know so propogate err
+ * for it to do the right thing.
+ */
+ if (rc == -EAGAIN)
+ return rc;
+ else if (rc < 0) {
rc = ISCSI_ERR_XMIT_FAILED;
goto error;
- }
- if (rc == 0)
+ } else if (rc == 0)
break;
consumed += rc;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 4c88065..6fabf66 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -45,14 +45,6 @@
#define FC_DISC_DELAY 3
-static int fc_disc_debug;
-
-#define FC_DEBUG_DISC(fmt...) \
- do { \
- if (fc_disc_debug) \
- FC_DBG(fmt); \
- } while (0)
-
static void fc_disc_gpn_ft_req(struct fc_disc *);
static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
@@ -137,8 +129,8 @@
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_disc *disc = &lport->disc;
- FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
- rport->port_id);
+ FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
+ rport->port_id);
switch (event) {
case RPORT_EV_CREATED:
@@ -191,8 +183,7 @@
lport = disc->lport;
- FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_DISC_DBG(disc, "Received an RSCN event\n");
/* make sure the frame contains an RSCN message */
rp = fc_frame_payload_get(fp, sizeof(*rp));
@@ -225,8 +216,8 @@
*/
switch (fmt) {
case ELS_ADDR_FMT_PORT:
- FC_DEBUG_DISC("Port address format for port (%6x)\n",
- ntoh24(pp->rscn_fid));
+ FC_DISC_DBG(disc, "Port address format for port "
+ "(%6x)\n", ntoh24(pp->rscn_fid));
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
redisc = 1;
@@ -243,19 +234,19 @@
case ELS_ADDR_FMT_DOM:
case ELS_ADDR_FMT_FAB:
default:
- FC_DEBUG_DISC("Address format is (%d)\n", fmt);
+ FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
redisc = 1;
break;
}
}
lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
if (redisc) {
- FC_DEBUG_DISC("RSCN received: rediscovering\n");
+ FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
fc_disc_restart(disc);
} else {
- FC_DEBUG_DISC("RSCN received: not rediscovering. "
- "redisc %d state %d in_prog %d\n",
- redisc, lport->state, disc->pending);
+ FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
+ "redisc %d state %d in_prog %d\n",
+ redisc, lport->state, disc->pending);
list_for_each_entry_safe(dp, next, &disc_ports, peers) {
list_del(&dp->peers);
rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
@@ -270,7 +261,7 @@
fc_frame_free(fp);
return;
reject:
- FC_DEBUG_DISC("Received a bad RSCN frame\n");
+ FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
rjt_data.fp = NULL;
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
@@ -302,7 +293,8 @@
mutex_unlock(&disc->disc_mutex);
break;
default:
- FC_DBG("Received an unsupported request. opcode (%x)\n", op);
+ FC_DISC_DBG(disc, "Received an unsupported request, "
+ "the opcode is (%x)\n", op);
break;
}
}
@@ -320,12 +312,10 @@
struct fc_rport_libfc_priv *rdata, *next;
struct fc_lport *lport = disc->lport;
- FC_DEBUG_DISC("Restarting discovery for port (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_DISC_DBG(disc, "Restarting discovery\n");
list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
rport = PRIV_TO_RPORT(rdata);
- FC_DEBUG_DISC("list_del(%6x)\n", rport->port_id);
list_del(&rdata->peers);
lport->tt.rport_logoff(rport);
}
@@ -485,8 +475,7 @@
struct fc_lport *lport = disc->lport;
enum fc_disc_event event;
- FC_DEBUG_DISC("Discovery complete for port (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_DISC_DBG(disc, "Discovery complete\n");
event = disc->event;
disc->event = DISC_EV_NONE;
@@ -510,10 +499,10 @@
{
struct fc_lport *lport = disc->lport;
unsigned long delay = 0;
- if (fc_disc_debug)
- FC_DBG("Error %ld, retries %d/%d\n",
- PTR_ERR(fp), disc->retry_count,
- FC_DISC_RETRY_LIMIT);
+
+ FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
+ PTR_ERR(fp), disc->retry_count,
+ FC_DISC_RETRY_LIMIT);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
/*
@@ -649,9 +638,9 @@
&disc->rogue_rports);
lport->tt.rport_login(rport);
} else
- FC_DBG("Failed to allocate memory for "
- "the newly discovered port (%6x)\n",
- dp.ids.port_id);
+ printk(KERN_WARNING "libfc: Failed to allocate "
+ "memory for the newly discovered port "
+ "(%6x)\n", dp.ids.port_id);
}
if (np->fp_flags & FC_NS_FID_LAST) {
@@ -671,9 +660,8 @@
*/
if (error == 0 && len > 0 && len < sizeof(*np)) {
if (np != &disc->partial_buf) {
- FC_DEBUG_DISC("Partial buffer remains "
- "for discovery by (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_DISC_DBG(disc, "Partial buffer remains "
+ "for discovery\n");
memcpy(&disc->partial_buf, np, len);
}
disc->buf_len = (unsigned char) len;
@@ -721,8 +709,7 @@
int error;
mutex_lock(&disc->disc_mutex);
- FC_DEBUG_DISC("Received a GPN_FT response on port (%6x)\n",
- fc_host_port_id(disc->lport->host));
+ FC_DISC_DBG(disc, "Received a GPN_FT response\n");
if (IS_ERR(fp)) {
fc_disc_error(disc, fp);
@@ -738,30 +725,30 @@
disc->seq_count == 0) {
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp) {
- FC_DBG("GPN_FT response too short, len %d\n",
- fr_len(fp));
+ FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
+ fr_len(fp));
} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
/* Accepted, parse the response. */
buf = cp + 1;
len -= sizeof(*cp);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
- FC_DBG("GPN_FT rejected reason %x exp %x "
- "(check zoning)\n", cp->ct_reason,
- cp->ct_explan);
+ FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
+ "(check zoning)\n", cp->ct_reason,
+ cp->ct_explan);
disc->event = DISC_EV_FAILED;
fc_disc_done(disc);
} else {
- FC_DBG("GPN_FT unexpected response code %x\n",
- ntohs(cp->ct_cmd));
+ FC_DISC_DBG(disc, "GPN_FT unexpected response code "
+ "%x\n", ntohs(cp->ct_cmd));
}
} else if (fr_sof(fp) == FC_SOF_N3 &&
seq_cnt == disc->seq_count) {
buf = fh + 1;
} else {
- FC_DBG("GPN_FT unexpected frame - out of sequence? "
- "seq_cnt %x expected %x sof %x eof %x\n",
- seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
+ FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
+ "seq_cnt %x expected %x sof %x eof %x\n",
+ seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
}
if (buf) {
error = fc_disc_gpn_ft_parse(disc, buf, len);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 7af9bce..2bc22be 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -32,18 +32,7 @@
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
-/*
- * fc_exch_debug can be set in debugger or at compile time to get more logs.
- */
-static int fc_exch_debug;
-
-#define FC_DEBUG_EXCH(fmt...) \
- do { \
- if (fc_exch_debug) \
- FC_DBG(fmt); \
- } while (0)
-
-static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
+static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
/*
* Structure and function definitions for managing Fibre Channel Exchanges
@@ -333,8 +322,8 @@
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
return;
- FC_DEBUG_EXCH("Exchange (%4x) timed out, notifying the upper layer\n",
- ep->xid);
+ FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n");
+
if (schedule_delayed_work(&ep->timeout_work,
msecs_to_jiffies(timer_msec)))
fc_exch_hold(ep); /* hold for timer */
@@ -545,7 +534,7 @@
/* alloc a new xid */
xid = fc_em_alloc_xid(mp, fp);
if (!xid) {
- printk(KERN_ERR "fc_em_alloc_xid() failed\n");
+ printk(KERN_WARNING "libfc: Failed to allocate an exhange\n");
goto err;
}
}
@@ -820,8 +809,8 @@
struct fc_exch *ep = fc_seq_exch(sp);
sp = fc_seq_alloc(ep, ep->seq_id++);
- FC_DEBUG_EXCH("exch %4x f_ctl %6x seq %2x\n",
- ep->xid, ep->f_ctl, sp->id);
+ FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
+ ep->f_ctl, sp->id);
return sp;
}
/*
@@ -901,7 +890,7 @@
fc_exch_els_rec(sp, els_data->fp);
break;
default:
- FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
+ FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd);
}
}
EXPORT_SYMBOL(fc_seq_els_rsp_send);
@@ -1134,7 +1123,7 @@
lp->tt.lport_recv(lp, sp, fp);
fc_exch_release(ep); /* release from lookup */
} else {
- FC_DEBUG_EXCH("exch/seq lookup failed: reject %x\n", reject);
+ FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject);
fc_frame_free(fp);
}
}
@@ -1242,10 +1231,10 @@
sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
if (!sp) {
atomic_inc(&mp->stats.xid_not_found);
- FC_DEBUG_EXCH("seq lookup failed\n");
+ FC_EM_DBG(mp, "seq lookup failed\n");
} else {
atomic_inc(&mp->stats.non_bls_resp);
- FC_DEBUG_EXCH("non-BLS response to sequence");
+ FC_EM_DBG(mp, "non-BLS response to sequence");
}
fc_frame_free(fp);
}
@@ -1266,8 +1255,8 @@
int rc = 1, has_rec = 0;
fh = fc_frame_header_get(fp);
- FC_DEBUG_EXCH("exch: BLS rctl %x - %s\n",
- fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
+ FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
if (cancel_delayed_work_sync(&ep->timeout_work))
fc_exch_release(ep); /* release from pending timer hold */
@@ -1359,9 +1348,9 @@
case FC_RCTL_ACK_0:
break;
default:
- FC_DEBUG_EXCH("BLS rctl %x - %s received",
- fh->fh_r_ctl,
- fc_exch_rctl_name(fh->fh_r_ctl));
+ FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
+ fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
break;
}
fc_frame_free(fp);
@@ -1599,7 +1588,8 @@
if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
goto cleanup;
- FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
+ FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
+ "frame error %d\n", err);
return;
}
@@ -1608,12 +1598,13 @@
switch (op) {
case ELS_LS_RJT:
- FC_DBG("LS_RJT for RRQ");
+ FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
/* fall through */
case ELS_LS_ACC:
goto cleanup;
default:
- FC_DBG("unexpected response op %x for RRQ", op);
+ FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
+ "for RRQ", op);
return;
}
@@ -1740,8 +1731,8 @@
size_t len;
if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
- FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
- min_xid, max_xid);
+ FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+ min_xid, max_xid);
return NULL;
}
@@ -1878,7 +1869,8 @@
/* lport lock ? */
if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
- FC_DBG("fc_lport or EM is not allocated and configured");
+ FC_LPORT_DBG(lp, "Receiving frames for an lport that "
+ "has not been initialized correctly\n");
fc_frame_free(fp);
return;
}
@@ -1904,7 +1896,7 @@
fc_exch_recv_req(lp, mp, fp);
break;
default:
- FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+ FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp));
fc_frame_free(fp);
break;
}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index ad8b747..e303e0d 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -43,13 +43,9 @@
MODULE_DESCRIPTION("libfc");
MODULE_LICENSE("GPL v2");
-static int fc_fcp_debug;
-
-#define FC_DEBUG_FCP(fmt...) \
- do { \
- if (fc_fcp_debug) \
- FC_DBG(fmt); \
- } while (0)
+unsigned int fc_debug_logging;
+module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
static struct kmem_cache *scsi_pkt_cachep;
@@ -347,8 +343,8 @@
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
fc_frame_crc_check(fp))
goto crc_err;
- FC_DEBUG_FCP("data received past end. len %zx offset %zx "
- "data_len %x\n", len, offset, fsp->data_len);
+ FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
+ "data_len %x\n", len, offset, fsp->data_len);
fc_fcp_retry_cmd(fsp);
return;
}
@@ -411,7 +407,8 @@
stats->ErrorFrames++;
/* FIXME - per cpu count, not total count! */
if (stats->InvalidCRCCount++ < 5)
- printk(KERN_WARNING "CRC error on data frame for port (%6x)\n",
+ printk(KERN_WARNING "libfc: CRC error on data "
+ "frame for port (%6x)\n",
fc_host_port_id(lp->host));
/*
* Assume the frame is total garbage.
@@ -475,14 +472,14 @@
WARN_ON(seq_blen <= 0);
if (unlikely(offset + seq_blen > fsp->data_len)) {
/* this should never happen */
- FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
- seq_blen, offset);
+ FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
+ "offset %zx\n", seq_blen, offset);
fc_fcp_send_abort(fsp);
return 0;
} else if (offset != fsp->xfer_len) {
/* Out of Order Data Request - no problem, but unexpected. */
- FC_DEBUG_FCP("xfer-ready non-contiguous. "
- "seq_blen %zx offset %zx\n", seq_blen, offset);
+ FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
+ "seq_blen %zx offset %zx\n", seq_blen, offset);
}
/*
@@ -493,7 +490,7 @@
t_blen = fsp->max_payload;
if (lp->seq_offload) {
t_blen = min(seq_blen, (size_t)lp->lso_max);
- FC_DEBUG_FCP("fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
+ FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
fsp, seq_blen, lp->lso_max, t_blen);
}
@@ -694,7 +691,7 @@
if (!can_queue)
can_queue = 1;
lp->host->can_queue = can_queue;
- shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
+ shost_printk(KERN_ERR, lp->host, "libfc: Could not allocate frame.\n"
"Reducing can_queue to %d.\n", can_queue);
done:
spin_unlock_irqrestore(lp->host->host_lock, flags);
@@ -768,7 +765,7 @@
fc_fcp_resp(fsp, fp);
} else {
- FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
+ FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl);
}
unlock:
fc_fcp_unlock_pkt(fsp);
@@ -877,17 +874,17 @@
return;
}
fsp->status_code = FC_DATA_OVRRUN;
- FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
- "data len %x\n",
- fsp->rport->port_id,
- fsp->xfer_len, expected_len, fsp->data_len);
+ FC_FCP_DBG(fsp, "tgt %6x xfer len %zx greater than expected, "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
}
fc_fcp_complete_locked(fsp);
return;
len_err:
- FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
- flags, fr_len(fp), respl, snsl);
+ FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
+ "snsl %u\n", flags, fr_len(fp), respl, snsl);
err:
fsp->status_code = FC_ERROR;
fc_fcp_complete_locked(fsp);
@@ -1107,13 +1104,11 @@
if (fc_fcp_lock_pkt(fsp))
return;
- switch (error) {
- case -FC_EX_CLOSED:
+ if (error == -FC_EX_CLOSED) {
fc_fcp_retry_cmd(fsp);
goto unlock;
- default:
- FC_DBG("unknown error %ld\n", PTR_ERR(fp));
}
+
/*
* clear abort pending, because the lower layer
* decided to force completion.
@@ -1145,10 +1140,10 @@
fsp->wait_for_comp = 0;
if (!rc) {
- FC_DBG("target abort cmd failed\n");
+ FC_FCP_DBG(fsp, "target abort cmd failed\n");
rc = FAILED;
} else if (fsp->state & FC_SRB_ABORTED) {
- FC_DBG("target abort cmd passed\n");
+ FC_FCP_DBG(fsp, "target abort cmd passed\n");
rc = SUCCESS;
fc_fcp_complete_locked(fsp);
}
@@ -1213,7 +1208,7 @@
spin_unlock_bh(&fsp->scsi_pkt_lock);
if (!rc) {
- FC_DBG("lun reset failed\n");
+ FC_SCSI_DBG(lp, "lun reset failed\n");
return FAILED;
}
@@ -1221,7 +1216,7 @@
if (fsp->cdb_status != FCP_TMF_CMPL)
return FAILED;
- FC_DBG("lun reset to lun %u completed\n", lun);
+ FC_SCSI_DBG(lp, "lun reset to lun %u completed\n", lun);
fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
return SUCCESS;
}
@@ -1388,13 +1383,13 @@
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
switch (rjt->er_reason) {
default:
- FC_DEBUG_FCP("device %x unexpected REC reject "
- "reason %d expl %d\n",
- fsp->rport->port_id, rjt->er_reason,
- rjt->er_explan);
+ FC_FCP_DBG(fsp, "device %x unexpected REC reject "
+ "reason %d expl %d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
/* fall through */
case ELS_RJT_UNSUP:
- FC_DEBUG_FCP("device does not support REC\n");
+ FC_FCP_DBG(fsp, "device does not support REC\n");
rp = fsp->rport->dd_data;
/*
* if we do not spport RECs or got some bogus
@@ -1514,8 +1509,8 @@
break;
default:
- FC_DBG("REC %p fid %x error unexpected error %d\n",
- fsp, fsp->rport->port_id, error);
+ FC_FCP_DBG(fsp, "REC %p fid %x error unexpected error %d\n",
+ fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
/* fall through */
@@ -1524,9 +1519,9 @@
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
- FC_DBG("REC fid %x error error %d retry %d/%d\n",
- fsp->rport->port_id, error, fsp->recov_retry,
- FC_MAX_RECOV_RETRY);
+ FC_FCP_DBG(fsp, "REC fid %x error error %d retry %d/%d\n",
+ fsp->rport->port_id, error, fsp->recov_retry,
+ FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
@@ -2011,9 +2006,11 @@
if (lp->state != LPORT_ST_READY)
return rc;
+ FC_SCSI_DBG(lp, "Resetting rport (%6x)\n", rport->port_id);
+
fsp = fc_fcp_pkt_alloc(lp, GFP_NOIO);
if (fsp == NULL) {
- FC_DBG("could not allocate scsi_pkt\n");
+ printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
sc_cmd->result = DID_NO_CONNECT << 16;
goto out;
}
@@ -2048,17 +2045,21 @@
struct fc_lport *lp = shost_priv(shost);
unsigned long wait_tmo;
+ FC_SCSI_DBG(lp, "Resetting host\n");
+
lp->tt.lport_reset(lp);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lp) && time_before(jiffies, wait_tmo))
msleep(1000);
if (fc_fcp_lport_queue_ready(lp)) {
- shost_printk(KERN_INFO, shost, "Host reset succeeded.\n");
+ shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
+ "on port (%6x)\n", fc_host_port_id(lp->host));
return SUCCESS;
} else {
- shost_printk(KERN_INFO, shost, "Host reset failed. "
- "lport not ready.\n");
+ shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
+ "port (%6x) is not ready.\n",
+ fc_host_port_id(lp->host));
return FAILED;
}
}
@@ -2117,7 +2118,8 @@
struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
if (!list_empty(&si->scsi_pkt_queue))
- printk(KERN_ERR "Leaked scsi packets.\n");
+ printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
+ "port (%6x)\n", fc_host_port_id(lp->host));
mempool_destroy(si->scsi_pkt_pool);
kfree(si);
@@ -2166,7 +2168,8 @@
sizeof(struct fc_fcp_pkt),
0, SLAB_HWCACHE_ALIGN, NULL);
if (scsi_pkt_cachep == NULL) {
- FC_DBG("Unable to allocate SRB cache...module load failed!");
+ printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
+ "module load failed!");
return -ENOMEM;
}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e0c2477..745fa55 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -101,14 +101,6 @@
#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
-static int fc_lport_debug;
-
-#define FC_DEBUG_LPORT(fmt...) \
- do { \
- if (fc_lport_debug) \
- FC_DBG(fmt); \
- } while (0)
-
static void fc_lport_error(struct fc_lport *, struct fc_frame *);
static void fc_lport_enter_reset(struct fc_lport *);
@@ -151,8 +143,8 @@
struct fc_rport *rport,
enum fc_rport_event event)
{
- FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
- rport->port_id);
+ FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
+ rport->port_id);
switch (event) {
case RPORT_EV_CREATED:
@@ -162,19 +154,19 @@
lport->dns_rp = rport;
fc_lport_enter_rpn_id(lport);
} else {
- FC_DEBUG_LPORT("Received an CREATED event on "
- "port (%6x) for the directory "
- "server, but the lport is not "
- "in the DNS state, it's in the "
- "%d state", rport->port_id,
- lport->state);
+ FC_LPORT_DBG(lport, "Received an CREATED event "
+ "on port (%6x) for the directory "
+ "server, but the lport is not "
+ "in the DNS state, it's in the "
+ "%d state", rport->port_id,
+ lport->state);
lport->tt.rport_logoff(rport);
}
mutex_unlock(&lport->lp_mutex);
} else
- FC_DEBUG_LPORT("Received an event for port (%6x) "
- "which is not the directory server\n",
- rport->port_id);
+ FC_LPORT_DBG(lport, "Received an event for port (%6x) "
+ "which is not the directory server\n",
+ rport->port_id);
break;
case RPORT_EV_LOGO:
case RPORT_EV_FAILED:
@@ -185,9 +177,9 @@
mutex_unlock(&lport->lp_mutex);
} else
- FC_DEBUG_LPORT("Received an event for port (%6x) "
- "which is not the directory server\n",
- rport->port_id);
+ FC_LPORT_DBG(lport, "Received an event for port (%6x) "
+ "which is not the directory server\n",
+ rport->port_id);
break;
case RPORT_EV_NONE:
break;
@@ -363,8 +355,8 @@
static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
struct fc_lport *lport)
{
- FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+ fc_lport_state(lport));
lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
fc_frame_free(fp);
@@ -389,8 +381,8 @@
void *dp;
u32 f_ctl;
- FC_DEBUG_LPORT("Received RLIR request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+ fc_lport_state(lport));
len = fr_len(in_fp) - sizeof(struct fc_frame_header);
pp = fc_frame_payload_get(in_fp, len);
@@ -437,8 +429,8 @@
size_t len;
u32 f_ctl;
- FC_DEBUG_LPORT("Received RNID request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
+ fc_lport_state(lport));
req = fc_frame_payload_get(in_fp, sizeof(*req));
if (!req) {
@@ -498,8 +490,8 @@
size_t len;
u32 f_ctl;
- FC_DEBUG_LPORT("Received ADISC request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
+ fc_lport_state(lport));
req = fc_frame_payload_get(in_fp, sizeof(*req));
if (!req) {
@@ -574,8 +566,8 @@
*/
void fc_linkup(struct fc_lport *lport)
{
- FC_DEBUG_LPORT("Link is up for port (%6x)\n",
- fc_host_port_id(lport->host));
+ printk(KERN_INFO "libfc: Link up on port (%6x)\n",
+ fc_host_port_id(lport->host));
mutex_lock(&lport->lp_mutex);
if (!lport->link_up) {
@@ -595,8 +587,8 @@
void fc_linkdown(struct fc_lport *lport)
{
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Link is down for port (%6x)\n",
- fc_host_port_id(lport->host));
+ printk(KERN_INFO "libfc: Link down on port (%6x)\n",
+ fc_host_port_id(lport->host));
if (lport->link_up) {
lport->link_up = 0;
@@ -701,12 +693,11 @@
{
switch (event) {
case DISC_EV_SUCCESS:
- FC_DEBUG_LPORT("Got a SUCCESS event for port (%6x)\n",
- fc_host_port_id(lport->host));
+ FC_LPORT_DBG(lport, "Discovery succeeded\n");
break;
case DISC_EV_FAILED:
- FC_DEBUG_LPORT("Got a FAILED event for port (%6x)\n",
- fc_host_port_id(lport->host));
+ printk(KERN_ERR "libfc: Discovery failed for port (%6x)\n",
+ fc_host_port_id(lport->host));
mutex_lock(&lport->lp_mutex);
fc_lport_enter_reset(lport);
mutex_unlock(&lport->lp_mutex);
@@ -726,8 +717,8 @@
*/
static void fc_lport_enter_ready(struct fc_lport *lport)
{
- FC_DEBUG_LPORT("Port (%6x) entered Ready from state %s\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered READY from state %s\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_READY);
@@ -762,8 +753,8 @@
u32 local_fid;
u32 f_ctl;
- FC_DEBUG_LPORT("Received FLOGI request while in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
+ fc_lport_state(lport));
fh = fc_frame_header_get(rx_fp);
remote_fid = ntoh24(fh->fh_s_id);
@@ -772,12 +763,11 @@
goto out;
remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
if (remote_wwpn == lport->wwpn) {
- FC_DBG("FLOGI from port with same WWPN %llx "
- "possible configuration error\n",
- (unsigned long long)remote_wwpn);
+ printk(KERN_WARNING "libfc: Received FLOGI from port "
+ "with same WWPN %llx\n", remote_wwpn);
goto out;
}
- FC_DBG("FLOGI from port WWPN %llx\n", (unsigned long long)remote_wwpn);
+ FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);
/*
* XXX what is the right thing to do for FIDs?
@@ -909,7 +899,8 @@
}
}
} else {
- FC_DBG("dropping invalid frame (eof %x)\n", fr_eof(fp));
+ FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
+ fr_eof(fp));
fc_frame_free(fp);
}
mutex_unlock(&lport->lp_mutex);
@@ -947,8 +938,8 @@
*/
static void fc_lport_enter_reset(struct fc_lport *lport)
{
- FC_DEBUG_LPORT("Port (%6x) entered RESET state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_RESET);
@@ -982,9 +973,9 @@
static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
{
unsigned long delay = 0;
- FC_DEBUG_LPORT("Error %ld in state %s, retries %d\n",
- PTR_ERR(fp), fc_lport_state(lport),
- lport->retry_count);
+ FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
+ PTR_ERR(fp), fc_lport_state(lport),
+ lport->retry_count);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
/*
@@ -1040,11 +1031,11 @@
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a RFT_ID response\n");
+ FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
if (lport->state != LPORT_ST_RFT_ID) {
- FC_DBG("Received a RFT_ID response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1094,11 +1085,11 @@
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a RPN_ID response\n");
+ FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
if (lport->state != LPORT_ST_RPN_ID) {
- FC_DBG("Received a RPN_ID response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1146,11 +1137,11 @@
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a SCR response\n");
+ FC_LPORT_DBG(lport, "Received a SCR response\n");
if (lport->state != LPORT_ST_SCR) {
- FC_DBG("Received a SCR response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a SCR response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1184,8 +1175,8 @@
{
struct fc_frame *fp;
- FC_DEBUG_LPORT("Port (%6x) entered SCR state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_SCR);
@@ -1213,8 +1204,8 @@
struct fc_ns_fts *lps;
int i;
- FC_DEBUG_LPORT("Port (%6x) entered RFT_ID state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered RFT_ID state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_RFT_ID);
@@ -1253,8 +1244,8 @@
{
struct fc_frame *fp;
- FC_DEBUG_LPORT("Port (%6x) entered RPN_ID state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered RPN_ID state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_RPN_ID);
@@ -1294,8 +1285,8 @@
dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
dp.lp = lport;
- FC_DEBUG_LPORT("Port (%6x) entered DNS state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_DNS);
@@ -1374,11 +1365,11 @@
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a LOGO response\n");
+ FC_LPORT_DBG(lport, "Received a LOGO response\n");
if (lport->state != LPORT_ST_LOGO) {
- FC_DBG("Received a LOGO response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1413,8 +1404,8 @@
struct fc_frame *fp;
struct fc_els_logo *logo;
- FC_DEBUG_LPORT("Port (%6x) entered LOGO state from %s state\n",
- fc_host_port_id(lport->host), fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_LOGO);
@@ -1456,11 +1447,11 @@
mutex_lock(&lport->lp_mutex);
- FC_DEBUG_LPORT("Received a FLOGI response\n");
+ FC_LPORT_DBG(lport, "Received a FLOGI response\n");
if (lport->state != LPORT_ST_FLOGI) {
- FC_DBG("Received a FLOGI response, but in state %s\n",
- fc_lport_state(lport));
+ FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
+ "%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -1475,7 +1466,8 @@
did = ntoh24(fh->fh_d_id);
if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
- FC_DEBUG_LPORT("Assigned fid %x\n", did);
+ printk(KERN_INFO "libfc: Assigned FID (%6x) in FLOGI response\n",
+ did);
fc_host_port_id(lport->host) = did;
flp = fc_frame_payload_get(fp, sizeof(*flp));
@@ -1494,7 +1486,8 @@
if (e_d_tov > lport->e_d_tov)
lport->e_d_tov = e_d_tov;
lport->r_a_tov = 2 * e_d_tov;
- FC_DBG("Point-to-Point mode\n");
+ printk(KERN_INFO "libfc: Port (%6x) entered "
+ "point to point mode\n", did);
fc_lport_ptp_setup(lport, ntoh24(fh->fh_s_id),
get_unaligned_be64(
&flp->fl_wwpn),
@@ -1517,7 +1510,7 @@
}
}
} else {
- FC_DBG("bad FLOGI response\n");
+ FC_LPORT_DBG(lport, "Bad FLOGI response\n");
}
out:
@@ -1537,7 +1530,8 @@
{
struct fc_frame *fp;
- FC_DEBUG_LPORT("Processing FLOGI state\n");
+ FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
+ fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_FLOGI);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 7bfbff7..7162385f 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -55,14 +55,6 @@
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
-static int fc_rport_debug;
-
-#define FC_DEBUG_RPORT(fmt...) \
- do { \
- if (fc_rport_debug) \
- FC_DBG(fmt); \
- } while (0)
-
struct workqueue_struct *rport_event_queue;
static void fc_rport_enter_plogi(struct fc_rport *);
@@ -97,7 +89,7 @@
static void fc_rport_rogue_destroy(struct device *dev)
{
struct fc_rport *rport = dev_to_rport(dev);
- FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id);
+ FC_RPORT_DBG(rport, "Destroying rogue rport\n");
kfree(rport);
}
@@ -263,8 +255,8 @@
fc_rport_state_enter(new_rport, RPORT_ST_READY);
} else {
- FC_DBG("Failed to create the rport for port "
- "(%6x).\n", ids.port_id);
+ printk(KERN_WARNING "libfc: Failed to allocate "
+ " memory for rport (%6x)\n", ids.port_id);
event = RPORT_EV_FAILED;
}
if (rport->port_id != FC_FID_DIR_SERV)
@@ -309,7 +301,7 @@
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id);
+ FC_RPORT_DBG(rport, "Login to port\n");
fc_rport_enter_plogi(rport);
@@ -329,16 +321,13 @@
int fc_rport_logoff(struct fc_rport *rport)
{
struct fc_rport_libfc_priv *rdata = rport->dd_data;
- struct fc_lport *lport = rdata->local_port;
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id);
+ FC_RPORT_DBG(rport, "Remove port\n");
if (rdata->rp_state == RPORT_ST_NONE) {
- FC_DEBUG_RPORT("(%6x): Port (%6x) in NONE state,"
- " not removing", fc_host_port_id(lport->host),
- rport->port_id);
+ FC_RPORT_DBG(rport, "Port in NONE state, not removing\n");
mutex_unlock(&rdata->rp_mutex);
goto out;
}
@@ -379,7 +368,7 @@
fc_rport_state_enter(rport, RPORT_ST_READY);
- FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id);
+ FC_RPORT_DBG(rport, "Port is Ready\n");
rdata->event = RPORT_EV_CREATED;
queue_work(rport_event_queue, &rdata->event_work);
@@ -436,8 +425,8 @@
{
struct fc_rport_libfc_priv *rdata = rport->dd_data;
- FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
- PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
+ FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n",
+ PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
switch (rdata->rp_state) {
case RPORT_ST_PLOGI:
@@ -479,8 +468,8 @@
return fc_rport_error(rport, fp);
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
- FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
- PTR_ERR(fp), fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n",
+ PTR_ERR(fp), fc_rport_state(rport));
rdata->retries++;
/* no additional delay on exchange timeouts */
if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
@@ -517,12 +506,11 @@
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
- rport->port_id);
+ FC_RPORT_DBG(rport, "Received a PLOGI response\n");
if (rdata->rp_state != RPORT_ST_PLOGI) {
- FC_DBG("Received a PLOGI response, but in state %s\n",
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received a PLOGI response, but in state "
+ "%s\n", fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -583,8 +571,8 @@
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
- FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n",
- rport->port_id, fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n",
+ fc_rport_state(rport));
fc_rport_state_enter(rport, RPORT_ST_PLOGI);
@@ -628,12 +616,11 @@
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
- rport->port_id);
+ FC_RPORT_DBG(rport, "Received a PRLI response\n");
if (rdata->rp_state != RPORT_ST_PRLI) {
- FC_DBG("Received a PRLI response, but in state %s\n",
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received a PRLI response, but in state "
+ "%s\n", fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -663,7 +650,7 @@
fc_rport_enter_rtv(rport);
} else {
- FC_DBG("Bad ELS response\n");
+ FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n");
rdata->event = RPORT_EV_FAILED;
fc_rport_state_enter(rport, RPORT_ST_NONE);
queue_work(rport_event_queue, &rdata->event_work);
@@ -695,12 +682,11 @@
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n",
- rport->port_id);
+ FC_RPORT_DBG(rport, "Received a LOGO response\n");
if (rdata->rp_state != RPORT_ST_LOGO) {
- FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n",
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received a LOGO response, but in state "
+ "%s\n", fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -715,7 +701,7 @@
if (op == ELS_LS_ACC) {
fc_rport_enter_rtv(rport);
} else {
- FC_DBG("Bad ELS response\n");
+ FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n");
rdata->event = RPORT_EV_LOGO;
fc_rport_state_enter(rport, RPORT_ST_NONE);
queue_work(rport_event_queue, &rdata->event_work);
@@ -745,8 +731,8 @@
} *pp;
struct fc_frame *fp;
- FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n",
- rport->port_id, fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n",
+ fc_rport_state(rport));
fc_rport_state_enter(rport, RPORT_ST_PRLI);
@@ -784,12 +770,11 @@
mutex_lock(&rdata->rp_mutex);
- FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
- rport->port_id);
+ FC_RPORT_DBG(rport, "Received a RTV response\n");
if (rdata->rp_state != RPORT_ST_RTV) {
- FC_DBG("Received a RTV response, but in state %s\n",
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received a RTV response, but in state "
+ "%s\n", fc_rport_state(rport));
if (IS_ERR(fp))
goto err;
goto out;
@@ -844,8 +829,8 @@
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_lport *lport = rdata->local_port;
- FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n",
- rport->port_id, fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n",
+ fc_rport_state(rport));
fc_rport_state_enter(rport, RPORT_ST_RTV);
@@ -875,8 +860,8 @@
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
- FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n",
- rport->port_id, fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n",
+ fc_rport_state(rport));
fc_rport_state_enter(rport, RPORT_ST_LOGO);
@@ -983,14 +968,13 @@
fh = fc_frame_header_get(fp);
- FC_DEBUG_RPORT("Received PLOGI request from port (%6x) "
- "while in state %s\n", ntoh24(fh->fh_s_id),
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n",
+ fc_rport_state(rport));
sid = ntoh24(fh->fh_s_id);
pl = fc_frame_payload_get(fp, sizeof(*pl));
if (!pl) {
- FC_DBG("incoming PLOGI from %x too short\n", sid);
+ FC_RPORT_DBG(rport, "Received PLOGI too short\n");
WARN_ON(1);
/* XXX TBD: send reject? */
fc_frame_free(fp);
@@ -1012,26 +996,26 @@
*/
switch (rdata->rp_state) {
case RPORT_ST_INIT:
- FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT "
- "- reject\n", sid, (unsigned long long)wwpn);
+ FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT "
+ "- reject\n", (unsigned long long)wwpn);
reject = ELS_RJT_UNSUP;
break;
case RPORT_ST_PLOGI:
- FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n",
- sid, rdata->rp_state);
+ FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n",
+ rdata->rp_state);
if (wwpn < lport->wwpn)
reject = ELS_RJT_INPROG;
break;
case RPORT_ST_PRLI:
case RPORT_ST_READY:
- FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d "
- "- ignored for now\n", sid, rdata->rp_state);
+ FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d "
+ "- ignored for now\n", rdata->rp_state);
/* XXX TBD - should reset */
break;
case RPORT_ST_NONE:
default:
- FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected "
- "state %d\n", sid, rdata->rp_state);
+ FC_RPORT_DBG(rport, "Received PLOGI in unexpected "
+ "state %d\n", rdata->rp_state);
fc_frame_free(fp);
return;
break;
@@ -1115,9 +1099,8 @@
fh = fc_frame_header_get(rx_fp);
- FC_DEBUG_RPORT("Received PRLI request from port (%6x) "
- "while in state %s\n", ntoh24(fh->fh_s_id),
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n",
+ fc_rport_state(rport));
switch (rdata->rp_state) {
case RPORT_ST_PRLI:
@@ -1252,9 +1235,8 @@
fh = fc_frame_header_get(fp);
- FC_DEBUG_RPORT("Received PRLO request from port (%6x) "
- "while in state %s\n", ntoh24(fh->fh_s_id),
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n",
+ fc_rport_state(rport));
if (rdata->rp_state == RPORT_ST_NONE) {
fc_frame_free(fp);
@@ -1286,9 +1268,8 @@
fh = fc_frame_header_get(fp);
- FC_DEBUG_RPORT("Received LOGO request from port (%6x) "
- "while in state %s\n", ntoh24(fh->fh_s_id),
- fc_rport_state(rport));
+ FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n",
+ fc_rport_state(rport));
if (rdata->rp_state == RPORT_ST_NONE) {
fc_frame_free(fp);
@@ -1308,7 +1289,6 @@
flush_workqueue(rport_event_queue);
}
-
int fc_rport_init(struct fc_lport *lport)
{
if (!lport->tt.rport_create)
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 59908ae..716cc34 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -38,15 +38,30 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/libiscsi.h>
-static int iscsi_dbg_lib;
-module_param_named(debug_libiscsi, iscsi_dbg_lib, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug_libiscsi, "Turn on debugging for libiscsi module. "
- "Set to 1 to turn on, and zero to turn off. Default "
- "is off.");
+static int iscsi_dbg_lib_conn;
+module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_conn,
+ "Turn on debugging for connections in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
+
+static int iscsi_dbg_lib_session;
+module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_session,
+ "Turn on debugging for sessions in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
+
+static int iscsi_dbg_lib_eh;
+module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_libiscsi_eh,
+ "Turn on debugging for error handling in libiscsi module. "
+ "Set to 1 to turn on, and zero to turn off. Default is off.");
#define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \
do { \
- if (iscsi_dbg_lib) \
+ if (iscsi_dbg_lib_conn) \
iscsi_conn_printk(KERN_INFO, _conn, \
"%s " dbg_fmt, \
__func__, ##arg); \
@@ -54,7 +69,15 @@
#define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \
do { \
- if (iscsi_dbg_lib) \
+ if (iscsi_dbg_lib_session) \
+ iscsi_session_printk(KERN_INFO, _session, \
+ "%s " dbg_fmt, \
+ __func__, ##arg); \
+ } while (0);
+
+#define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \
+ do { \
+ if (iscsi_dbg_lib_eh) \
iscsi_session_printk(KERN_INFO, _session, \
"%s " dbg_fmt, \
__func__, ##arg); \
@@ -954,6 +977,7 @@
task = iscsi_itt_to_ctask(conn, hdr->itt);
if (!task)
return ISCSI_ERR_BAD_ITT;
+ task->last_xfer = jiffies;
break;
case ISCSI_OP_R2T:
/*
@@ -1192,10 +1216,12 @@
spin_unlock_bh(&conn->session->lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->lock);
- __iscsi_put_task(task);
- if (!rc)
+ if (!rc) {
/* done with this task */
+ task->last_xfer = jiffies;
conn->task = NULL;
+ }
+ __iscsi_put_task(task);
return rc;
}
@@ -1361,6 +1387,9 @@
task->state = ISCSI_TASK_PENDING;
task->conn = conn;
task->sc = sc;
+ task->have_checked_conn = false;
+ task->last_timeout = jiffies;
+ task->last_xfer = jiffies;
INIT_LIST_HEAD(&task->running);
return task;
}
@@ -1555,10 +1584,10 @@
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_TERMINATE) {
failed:
- iscsi_session_printk(KERN_INFO, session,
- "failing target reset: Could not log "
- "back into target [age %d]\n",
- session->age);
+ ISCSI_DBG_EH(session,
+ "failing target reset: Could not log back into "
+ "target [age %d]\n",
+ session->age);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
return FAILED;
@@ -1572,7 +1601,7 @@
*/
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
- ISCSI_DBG_SESSION(session, "wait for relogin\n");
+ ISCSI_DBG_EH(session, "wait for relogin\n");
wait_event_interruptible(conn->ehwait,
session->state == ISCSI_STATE_TERMINATE ||
session->state == ISCSI_STATE_LOGGED_IN ||
@@ -1582,10 +1611,10 @@
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
- if (session->state == ISCSI_STATE_LOGGED_IN)
- iscsi_session_printk(KERN_INFO, session,
- "target reset succeeded\n");
- else
+ if (session->state == ISCSI_STATE_LOGGED_IN) {
+ ISCSI_DBG_EH(session,
+ "target reset succeeded\n");
+ } else
goto failed;
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@@ -1601,7 +1630,7 @@
spin_lock(&session->lock);
if (conn->tmf_state == TMF_QUEUED) {
conn->tmf_state = TMF_TIMEDOUT;
- ISCSI_DBG_SESSION(session, "tmf timedout\n");
+ ISCSI_DBG_EH(session, "tmf timedout\n");
/* unblock eh_abort() */
wake_up(&conn->ehwait);
}
@@ -1621,7 +1650,7 @@
spin_unlock_bh(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
spin_lock_bh(&session->lock);
- ISCSI_DBG_SESSION(session, "tmf exec failure\n");
+ ISCSI_DBG_EH(session, "tmf exec failure\n");
return -EPERM;
}
conn->tmfcmd_pdus_cnt++;
@@ -1629,7 +1658,7 @@
conn->tmf_timer.function = iscsi_tmf_timedout;
conn->tmf_timer.data = (unsigned long)conn;
add_timer(&conn->tmf_timer);
- ISCSI_DBG_SESSION(session, "tmf set timeout\n");
+ ISCSI_DBG_EH(session, "tmf set timeout\n");
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@@ -1716,17 +1745,18 @@
return 0;
}
-static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
{
+ enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+ struct iscsi_task *task = NULL;
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct iscsi_conn *conn;
- enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
- cls_session = starget_to_session(scsi_target(scmd->device));
+ cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- ISCSI_DBG_SESSION(session, "scsi cmd %p timedout\n", scmd);
+ ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
spin_lock(&session->lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -1745,6 +1775,26 @@
goto done;
}
+ task = (struct iscsi_task *)sc->SCp.ptr;
+ if (!task)
+ goto done;
+ /*
+ * If we have sent (at least queued to the network layer) a pdu or
+ * recvd one for the task since the last timeout ask for
+ * more time. If on the next timeout we have not made progress
+ * we can check if it is the task or connection when we send the
+ * nop as a ping.
+ */
+ if (time_after_eq(task->last_xfer, task->last_timeout)) {
+ ISCSI_DBG_EH(session, "Command making progress. Asking "
+ "scsi-ml for more time to complete. "
+ "Last data recv at %lu. Last timeout was at "
+ "%lu\n.", task->last_xfer, task->last_timeout);
+ task->have_checked_conn = false;
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
if (!conn->recv_timeout && !conn->ping_timeout)
goto done;
/*
@@ -1755,23 +1805,32 @@
rc = BLK_EH_RESET_TIMER;
goto done;
}
+
+ /* Assumes nop timeout is shorter than scsi cmd timeout */
+ if (task->have_checked_conn)
+ goto done;
+
/*
- * if we are about to check the transport then give the command
- * more time
+ * Checking the transport already or nop from a cmd timeout still
+ * running
*/
- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
- jiffies)) {
+ if (conn->ping_task) {
+ task->have_checked_conn = true;
rc = BLK_EH_RESET_TIMER;
goto done;
}
- /* if in the middle of checking the transport then give us more time */
- if (conn->ping_task)
- rc = BLK_EH_RESET_TIMER;
+ /* Make sure there is a transport check done */
+ iscsi_send_nopout(conn, NULL);
+ task->have_checked_conn = true;
+ rc = BLK_EH_RESET_TIMER;
+
done:
+ if (task)
+ task->last_timeout = jiffies;
spin_unlock(&session->lock);
- ISCSI_DBG_SESSION(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+ "timer reset" : "nh");
return rc;
}
@@ -1841,7 +1900,7 @@
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
+ ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
@@ -1850,8 +1909,8 @@
* got the command.
*/
if (!sc->SCp.ptr) {
- ISCSI_DBG_SESSION(session, "sc never reached iscsi layer or "
- "it completed.\n");
+ ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
+ "it completed.\n");
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
@@ -1865,7 +1924,7 @@
sc->SCp.phase != session->age) {
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
- ISCSI_DBG_SESSION(session, "failing abort due to dropped "
+ ISCSI_DBG_EH(session, "failing abort due to dropped "
"session.\n");
return FAILED;
}
@@ -1875,13 +1934,12 @@
age = session->age;
task = (struct iscsi_task *)sc->SCp.ptr;
- ISCSI_DBG_SESSION(session, "aborting [sc %p itt 0x%x]\n",
- sc, task->itt);
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n",
+ sc, task->itt);
/* task completed before time out */
if (!task->sc) {
- ISCSI_DBG_SESSION(session, "sc completed while abort in "
- "progress\n");
+ ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
goto success;
}
@@ -1930,8 +1988,8 @@
if (!sc->SCp.ptr) {
conn->tmf_state = TMF_INITIAL;
/* task completed before tmf abort response */
- ISCSI_DBG_SESSION(session, "sc completed while abort "
- "in progress\n");
+ ISCSI_DBG_EH(session, "sc completed while abort in "
+ "progress\n");
goto success;
}
/* fall through */
@@ -1943,16 +2001,16 @@
success:
spin_unlock_bh(&session->lock);
success_unlocked:
- ISCSI_DBG_SESSION(session, "abort success [sc %p itt 0x%x]\n",
- sc, task->itt);
+ ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
+ sc, task->itt);
mutex_unlock(&session->eh_mutex);
return SUCCESS;
failed:
spin_unlock_bh(&session->lock);
failed_unlocked:
- ISCSI_DBG_SESSION(session, "abort failed [sc %p itt 0x%x]\n", sc,
- task ? task->itt : 0);
+ ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
+ task ? task->itt : 0);
mutex_unlock(&session->eh_mutex);
return FAILED;
}
@@ -1979,8 +2037,7 @@
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- ISCSI_DBG_SESSION(session, "LU Reset [sc %p lun %u]\n",
- sc, sc->device->lun);
+ ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
@@ -2034,8 +2091,8 @@
unlock:
spin_unlock_bh(&session->lock);
done:
- ISCSI_DBG_SESSION(session, "dev reset result = %s\n",
- rc == SUCCESS ? "SUCCESS" : "FAILED");
+ ISCSI_DBG_EH(session, "dev reset result = %s\n",
+ rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&session->eh_mutex);
return rc;
}
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 2bc0709..2e0746d 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -686,6 +686,7 @@
"offset=%d, datalen=%d)\n",
tcp_task->data_offset,
tcp_conn->in.datalen);
+ task->last_xfer = jiffies;
rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
sdb->table.sgl,
sdb->table.nents,
@@ -713,9 +714,10 @@
rc = ISCSI_ERR_BAD_ITT;
else if (ahslen)
rc = ISCSI_ERR_AHSLEN;
- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+ else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+ task->last_xfer = jiffies;
rc = iscsi_tcp_r2t_rsp(conn, task);
- else
+ } else
rc = ISCSI_ERR_PROTO;
spin_unlock(&conn->session->lock);
break;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 4a990f4..cca8e4a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -216,7 +216,7 @@
static int
qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
- uint16_t ram_words, void **nxt)
+ uint32_t ram_words, void **nxt)
{
int rval;
uint32_t cnt, stat, timer, words, idx;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 2620261..f2ce8e3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2301,7 +2301,7 @@
static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
char *link_speed;
int rval;
- uint16_t mb[6];
+ uint16_t mb[4];
struct qla_hw_data *ha = vha->hw;
if (!IS_IIDMA_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 451ece0..fe69f30 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1267,17 +1267,22 @@
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
- mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ if (IS_FWI2_CAPABLE(vha->hw))
+ mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ else
+ mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
/* Return firmware states. */
states[0] = mcp->mb[1];
- states[1] = mcp->mb[2];
- states[2] = mcp->mb[3];
- states[3] = mcp->mb[4];
- states[4] = mcp->mb[5];
+ if (IS_FWI2_CAPABLE(vha->hw)) {
+ states[1] = mcp->mb[2];
+ states[2] = mcp->mb[3];
+ states[3] = mcp->mb[4];
+ states[4] = mcp->mb[5];
+ }
if (rval != QLA_SUCCESS) {
/*EMPTY*/
@@ -2697,10 +2702,13 @@
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
- mcp->mb[4] = mcp->mb[5] = 0;
- mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
+ if (IS_QLA81XX(vha->hw))
+ mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
+ else
+ mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -2710,8 +2718,6 @@
mb[0] = mcp->mb[0];
mb[1] = mcp->mb[1];
mb[3] = mcp->mb[3];
- mb[4] = mcp->mb[4];
- mb[5] = mcp->mb[5];
}
if (rval != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index dcf0116..f0396e7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1663,7 +1663,7 @@
/* queue 0 uses two msix vectors */
if (ql2xmultique_tag) {
cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
(cpus + 1) : (ha->msix_count - 1);
ha->max_req_queues = 2;
} else if (ql2xmaxqueues > 1) {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b63feaf..8436970 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k3"
+#define QLA2XXX_VERSION "8.03.01-k4"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 41a2177..fb9af20 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -101,6 +101,8 @@
#define DEF_DIF 0
#define DEF_GUARD 0
#define DEF_ATO 1
+#define DEF_PHYSBLK_EXP 0
+#define DEF_LOWEST_ALIGNED 0
/* bit mask values for scsi_debug_opts */
#define SCSI_DEBUG_OPT_NOISE 1
@@ -156,6 +158,8 @@
static int scsi_debug_dif = DEF_DIF;
static int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_ato = DEF_ATO;
+static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
+static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_cmnd_count = 0;
@@ -657,7 +661,12 @@
static int inquiry_evpd_b0(unsigned char * arr)
{
+ unsigned int gran;
+
memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
+ gran = 1 << scsi_debug_physblk_exp;
+ arr[2] = (gran >> 8) & 0xff;
+ arr[3] = gran & 0xff;
if (sdebug_store_sectors > 0x400) {
arr[4] = (sdebug_store_sectors >> 24) & 0xff;
arr[5] = (sdebug_store_sectors >> 16) & 0xff;
@@ -945,6 +954,9 @@
arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
arr[11] = scsi_debug_sector_size & 0xff;
+ arr[13] = scsi_debug_physblk_exp & 0xf;
+ arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
+ arr[15] = scsi_debug_lowest_aligned & 0xff;
if (scsi_debug_dif) {
arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
@@ -2380,6 +2392,8 @@
module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
+module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
+module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2401,7 +2415,9 @@
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
-MODULE_PARM_DESC(sector_size, "hardware sector size in bytes (def=512)");
+MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
+MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
@@ -2874,6 +2890,18 @@
return -EINVAL;
}
+ if (scsi_debug_physblk_exp > 15) {
+ printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
+ scsi_debug_physblk_exp);
+ return -EINVAL;
+ }
+
+ if (scsi_debug_lowest_aligned > 0x3fff) {
+ printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
+ scsi_debug_lowest_aligned);
+ return -EINVAL;
+ }
+
if (scsi_debug_dev_size_mb < 1)
scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 8821df9..93c2622 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -24,6 +24,13 @@
unsigned compatible; /* for use with scsi_static_device_list entries */
};
+struct scsi_dev_info_list_table {
+ struct list_head node; /* our node for being on the master list */
+ struct list_head scsi_dev_info_list; /* head of dev info list */
+ const char *name; /* name of list for /proc (NULL for global) */
+ int key; /* unique numeric identifier */
+};
+
static const char spaces[] = " "; /* 16 of them */
static unsigned scsi_default_dev_flags;
@@ -247,6 +254,22 @@
{ NULL, NULL, NULL, 0 },
};
+static struct scsi_dev_info_list_table *scsi_devinfo_lookup_by_key(int key)
+{
+ struct scsi_dev_info_list_table *devinfo_table;
+ int found = 0;
+
+ list_for_each_entry(devinfo_table, &scsi_dev_info_list, node)
+ if (devinfo_table->key == key) {
+ found = 1;
+ break;
+ }
+ if (!found)
+ return ERR_PTR(-EINVAL);
+
+ return devinfo_table;
+}
+
/*
* scsi_strcpy_devinfo: called from scsi_dev_info_list_add to copy into
* devinfo vendor and model strings.
@@ -296,7 +319,38 @@
static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
char *strflags, int flags)
{
+ return scsi_dev_info_list_add_keyed(compatible, vendor, model,
+ strflags, flags,
+ SCSI_DEVINFO_GLOBAL);
+}
+
+/**
+ * scsi_dev_info_list_add_keyed - add one dev_info list entry.
+ * @compatible: if true, null terminate short strings. Otherwise space pad.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @strflags: integer string
+ * @flags: if strflags NULL, use this flag value
+ * @key: specify list to use
+ *
+ * Description:
+ * Create and add one dev_info entry for @vendor, @model,
+ * @strflags or @flag in list specified by @key. If @compatible,
+ * add to the tail of the list, do not space pad, and set
+ * devinfo->compatible. The scsi_static_device_list entries are
+ * added with @compatible 1 and @clfags NULL.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
+ char *strflags, int flags, int key)
+{
struct scsi_dev_info_list *devinfo;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
if (!devinfo) {
@@ -317,12 +371,15 @@
devinfo->compatible = compatible;
if (compatible)
- list_add_tail(&devinfo->dev_info_list, &scsi_dev_info_list);
+ list_add_tail(&devinfo->dev_info_list,
+ &devinfo_table->scsi_dev_info_list);
else
- list_add(&devinfo->dev_info_list, &scsi_dev_info_list);
+ list_add(&devinfo->dev_info_list,
+ &devinfo_table->scsi_dev_info_list);
return 0;
}
+EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
@@ -382,22 +439,48 @@
* @model: model name
*
* Description:
- * Search the scsi_dev_info_list for an entry matching @vendor and
- * @model, if found, return the matching flags value, else return
- * the host or global default settings. Called during scan time.
+ * Search the global scsi_dev_info_list (specified by list zero)
+ * for an entry matching @vendor and @model, if found, return the
+ * matching flags value, else return the host or global default
+ * settings. Called during scan time.
**/
int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model)
{
+ return scsi_get_device_flags_keyed(sdev, vendor, model,
+ SCSI_DEVINFO_GLOBAL);
+}
+
+
+/**
+ * get_device_flags_keyed - get device specific flags from the dynamic device list.
+ * @sdev: &scsi_device to get flags for
+ * @vendor: vendor name
+ * @model: model name
+ * @key: list to look up
+ *
+ * Description:
+ * Search the scsi_dev_info_list specified by @key for an entry
+ * matching @vendor and @model, if found, return the matching
+ * flags value, else return the host or global default settings.
+ * Called during scan time.
+ **/
+int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model,
+ int key)
+{
struct scsi_dev_info_list *devinfo;
- unsigned int bflags;
+ struct scsi_dev_info_list_table *devinfo_table;
- bflags = sdev->sdev_bflags;
- if (!bflags)
- bflags = scsi_default_dev_flags;
+ devinfo_table = scsi_devinfo_lookup_by_key(key);
- list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) {
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
+
+ list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
+ dev_info_list) {
if (devinfo->compatible) {
/*
* Behave like the older version of get_device_flags.
@@ -447,32 +530,89 @@
return devinfo->flags;
}
}
- return bflags;
+ /* nothing found, return nothing */
+ if (key != SCSI_DEVINFO_GLOBAL)
+ return 0;
+
+ /* except for the global list, where we have an exception */
+ if (sdev->sdev_bflags)
+ return sdev->sdev_bflags;
+
+ return scsi_default_dev_flags;
}
+EXPORT_SYMBOL(scsi_get_device_flags_keyed);
#ifdef CONFIG_SCSI_PROC_FS
+struct double_list {
+ struct list_head *top;
+ struct list_head *bottom;
+};
+
static int devinfo_seq_show(struct seq_file *m, void *v)
{
+ struct double_list *dl = v;
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table, node);
struct scsi_dev_info_list *devinfo =
- list_entry(v, struct scsi_dev_info_list, dev_info_list);
+ list_entry(dl->bottom, struct scsi_dev_info_list,
+ dev_info_list);
+
+ if (devinfo_table->scsi_dev_info_list.next == dl->bottom &&
+ devinfo_table->name)
+ seq_printf(m, "[%s]:\n", devinfo_table->name);
seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
- devinfo->vendor, devinfo->model, devinfo->flags);
+ devinfo->vendor, devinfo->model, devinfo->flags);
return 0;
}
-static void * devinfo_seq_start(struct seq_file *m, loff_t *pos)
+static void *devinfo_seq_start(struct seq_file *m, loff_t *ppos)
{
- return seq_list_start(&scsi_dev_info_list, *pos);
+ struct double_list *dl = kmalloc(sizeof(*dl), GFP_KERNEL);
+ loff_t pos = *ppos;
+
+ if (!dl)
+ return NULL;
+
+ list_for_each(dl->top, &scsi_dev_info_list) {
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table,
+ node);
+ list_for_each(dl->bottom, &devinfo_table->scsi_dev_info_list)
+ if (pos-- == 0)
+ return dl;
+ }
+
+ kfree(dl);
+ return NULL;
}
-static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos)
+static void *devinfo_seq_next(struct seq_file *m, void *v, loff_t *ppos)
{
- return seq_list_next(v, &scsi_dev_info_list, pos);
+ struct double_list *dl = v;
+ struct scsi_dev_info_list_table *devinfo_table =
+ list_entry(dl->top, struct scsi_dev_info_list_table, node);
+
+ ++*ppos;
+ dl->bottom = dl->bottom->next;
+ while (&devinfo_table->scsi_dev_info_list == dl->bottom) {
+ dl->top = dl->top->next;
+ if (dl->top == &scsi_dev_info_list) {
+ kfree(dl);
+ return NULL;
+ }
+ devinfo_table = list_entry(dl->top,
+ struct scsi_dev_info_list_table,
+ node);
+ dl->bottom = devinfo_table->scsi_dev_info_list.next;
+ }
+
+ return dl;
}
static void devinfo_seq_stop(struct seq_file *m, void *v)
{
+ kfree(v);
}
static const struct seq_operations scsi_devinfo_seq_ops = {
@@ -549,19 +689,78 @@
**/
void scsi_exit_devinfo(void)
{
- struct list_head *lh, *lh_next;
- struct scsi_dev_info_list *devinfo;
-
#ifdef CONFIG_SCSI_PROC_FS
remove_proc_entry("scsi/device_info", NULL);
#endif
- list_for_each_safe(lh, lh_next, &scsi_dev_info_list) {
+ scsi_dev_info_remove_list(SCSI_DEVINFO_GLOBAL);
+}
+
+/**
+ * scsi_dev_info_add_list - add a new devinfo list
+ * @key: key of the list to add
+ * @name: Name of the list to add (for /proc/scsi/device_info)
+ *
+ * Adds the requested list, returns zero on success, -EEXIST if the
+ * key is already registered to a list, or other error on failure.
+ */
+int scsi_dev_info_add_list(int key, const char *name)
+{
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (!IS_ERR(devinfo_table))
+ /* list already exists */
+ return -EEXIST;
+
+ devinfo_table = kmalloc(sizeof(*devinfo_table), GFP_KERNEL);
+
+ if (!devinfo_table)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&devinfo_table->node);
+ INIT_LIST_HEAD(&devinfo_table->scsi_dev_info_list);
+ devinfo_table->name = name;
+ devinfo_table->key = key;
+ list_add_tail(&devinfo_table->node, &scsi_dev_info_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_dev_info_add_list);
+
+/**
+ * scsi_dev_info_remove_list - destroy an added devinfo list
+ * @key: key of the list to destroy
+ *
+ * Iterates over the entire list first, freeing all the values, then
+ * frees the list itself. Returns 0 on success or -EINVAL if the key
+ * can't be found.
+ */
+int scsi_dev_info_remove_list(int key)
+{
+ struct list_head *lh, *lh_next;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ /* no such list */
+ return -EINVAL;
+
+ /* remove from the master list */
+ list_del(&devinfo_table->node);
+
+ list_for_each_safe(lh, lh_next, &devinfo_table->scsi_dev_info_list) {
+ struct scsi_dev_info_list *devinfo;
+
devinfo = list_entry(lh, struct scsi_dev_info_list,
dev_info_list);
kfree(devinfo);
}
+ kfree(devinfo_table);
+
+ return 0;
}
+EXPORT_SYMBOL(scsi_dev_info_remove_list);
/**
* scsi_init_devinfo - set up the dynamic device list.
@@ -577,10 +776,14 @@
#endif
int error, i;
- error = scsi_dev_info_list_add_str(scsi_dev_flags);
+ error = scsi_dev_info_add_list(SCSI_DEVINFO_GLOBAL, NULL);
if (error)
return error;
+ error = scsi_dev_info_list_add_str(scsi_dev_flags);
+ if (error)
+ goto out;
+
for (i = 0; scsi_static_device_list[i].vendor; i++) {
error = scsi_dev_info_list_add(1 /* compatibile */,
scsi_static_device_list[i].vendor,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 30f3275..f3c4089 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1207,6 +1207,7 @@
ret = scsi_setup_blk_pc_cmnd(sdev, req);
return scsi_prep_return(q, req, ret);
}
+EXPORT_SYMBOL(scsi_prep_fn);
/*
* scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index fbc83be..021e503 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -39,9 +39,25 @@
#endif
/* scsi_devinfo.c */
+
+/* list of keys for the lists */
+enum {
+ SCSI_DEVINFO_GLOBAL = 0,
+ SCSI_DEVINFO_SPI,
+};
+
extern int scsi_get_device_flags(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model);
+extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model, int key);
+extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
+ char *model, char *strflags,
+ int flags, int key);
+extern int scsi_dev_info_add_list(int key, const char *name);
+extern int scsi_dev_info_remove_list(int key);
+
extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
@@ -71,7 +87,6 @@
extern void scsi_exit_queue(void);
struct request_queue;
struct request;
-extern int scsi_prep_fn(struct request_queue *, struct request *);
extern struct kmem_cache *scsi_sdb_cache;
/* scsi_proc.c */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index fa4711d..91482f2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -420,29 +420,12 @@
return err;
}
-static int scsi_bus_remove(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- struct scsi_device *sdev = to_scsi_device(dev);
- int err = 0;
-
- /* reset the prep_fn back to the default since the
- * driver may have altered it and it's being removed */
- blk_queue_prep_rq(sdev->request_queue, scsi_prep_fn);
-
- if (drv && drv->remove)
- err = drv->remove(dev);
-
- return 0;
-}
-
struct bus_type scsi_bus_type = {
.name = "scsi",
.match = scsi_bus_match,
.uevent = scsi_bus_uevent,
.suspend = scsi_bus_suspend,
.resume = scsi_bus_resume,
- .remove = scsi_bus_remove,
};
EXPORT_SYMBOL_GPL(scsi_bus_type);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3f64d93..2eee9e6 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3397,7 +3397,6 @@
kfree(job);
}
-
/**
* fc_bsg_jobdone - completion routine for bsg requests that the LLD has
* completed
@@ -3408,15 +3407,10 @@
{
struct request *req = job->req;
struct request *rsp = req->next_rq;
- unsigned long flags;
int err;
- spin_lock_irqsave(&job->job_lock, flags);
- job->state_flags |= FC_RQST_STATE_DONE;
- job->ref_cnt--;
- spin_unlock_irqrestore(&job->job_lock, flags);
-
err = job->req->errors = job->reply->result;
+
if (err < 0)
/* we're only returning the result field in the reply */
job->req->sense_len = sizeof(uint32_t);
@@ -3433,12 +3427,26 @@
rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
rsp->resid_len);
}
-
- blk_end_request_all(req, err);
-
- fc_destroy_bsgjob(job);
+ blk_complete_request(req);
}
+/**
+ * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
+ * @req: BSG request that holds the job to be destroyed
+ */
+static void fc_bsg_softirq_done(struct request *rq)
+{
+ struct fc_bsg_job *job = rq->special;
+ unsigned long flags;
+
+ spin_lock_irqsave(&job->job_lock, flags);
+ job->state_flags |= FC_RQST_STATE_DONE;
+ job->ref_cnt--;
+ spin_unlock_irqrestore(&job->job_lock, flags);
+
+ blk_end_request_all(rq, rq->errors);
+ fc_destroy_bsgjob(job);
+}
/**
* fc_bsg_job_timeout - handler for when a bsg request timesout
@@ -3471,19 +3479,13 @@
"abort failed with status %d\n", err);
}
- if (!done) {
- spin_lock_irqsave(&job->job_lock, flags);
- job->ref_cnt--;
- spin_unlock_irqrestore(&job->job_lock, flags);
- fc_destroy_bsgjob(job);
- }
-
/* the blk_end_sync_io() doesn't check the error */
- return BLK_EH_HANDLED;
+ if (done)
+ return BLK_EH_NOT_HANDLED;
+ else
+ return BLK_EH_HANDLED;
}
-
-
static int
fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
{
@@ -3859,7 +3861,7 @@
struct fc_internal *i = to_fc_internal(shost->transportt);
struct request_queue *q;
int err;
- char bsg_name[BUS_ID_SIZE]; /*20*/
+ char bsg_name[20];
fc_host->rqst_q = NULL;
@@ -3879,6 +3881,7 @@
q->queuedata = shost;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_softirq_done(q, fc_bsg_softirq_done);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
@@ -3924,6 +3927,7 @@
q->queuedata = rport;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+ blk_queue_softirq_done(q, fc_bsg_softirq_done);
blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f3e6646..783e33c 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -692,6 +692,7 @@
"Too many iscsi targets. Max "
"number of targets is %d.\n",
ISCSI_MAX_TARGET - 1);
+ err = -EOVERFLOW;
goto release_host;
}
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index d606452..0895d3c 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -173,9 +173,9 @@
ret = handler(shost, rphy, req);
req->errors = ret;
- spin_lock_irq(q->queue_lock);
+ blk_end_request_all(req, ret);
- req->end_io(req, ret);
+ spin_lock_irq(q->queue_lock);
}
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 654a34f..c25bd9a 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -46,6 +46,22 @@
#define DV_RETRIES 3 /* should only need at most
* two cc/ua clears */
+/* Our blacklist flags */
+enum {
+ SPI_BLIST_NOIUS = 0x1,
+};
+
+/* blacklist table, modelled on scsi_devinfo.c */
+static struct {
+ char *vendor;
+ char *model;
+ unsigned flags;
+} spi_static_device_list[] __initdata = {
+ {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
+ {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
+ {NULL, NULL, 0}
+};
+
/* Private data accessors (keep these out of the header file) */
#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress)
#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)
@@ -207,6 +223,9 @@
{
struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_target *starget = sdev->sdev_target;
+ unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+ &sdev->inquiry[16],
+ SCSI_DEVINFO_SPI);
/* Populate the target capability fields with the values
* gleaned from the device inquiry */
@@ -216,6 +235,10 @@
spi_support_dt(starget) = scsi_device_dt(sdev);
spi_support_dt_only(starget) = scsi_device_dt_only(sdev);
spi_support_ius(starget) = scsi_device_ius(sdev);
+ if (bflags & SPI_BLIST_NOIUS) {
+ dev_info(dev, "Information Units disabled by blacklist\n");
+ spi_support_ius(starget) = 0;
+ }
spi_support_qas(starget) = scsi_device_qas(sdev);
return 0;
@@ -833,7 +856,7 @@
return;
}
- if (!scsi_device_wide(sdev)) {
+ if (!spi_support_wide(starget)) {
spi_max_width(starget) = 0;
max_width = 0;
}
@@ -860,7 +883,7 @@
return;
/* device can't handle synchronous */
- if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev))
+ if (!spi_support_sync(starget) && !spi_support_dt(starget))
return;
/* len == -1 is the signal that we need to ascertain the
@@ -876,13 +899,14 @@
/* try QAS requests; this should be harmless to set if the
* target supports it */
- if (scsi_device_qas(sdev) && spi_max_qas(starget)) {
+ if (spi_support_qas(starget) && spi_max_qas(starget)) {
DV_SET(qas, 1);
} else {
DV_SET(qas, 0);
}
- if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) {
+ if (spi_support_ius(starget) && spi_max_iu(starget) &&
+ min_period < 9) {
/* This u320 (or u640). Set IU transfers */
DV_SET(iu, 1);
/* Then set the optional parameters */
@@ -902,7 +926,7 @@
i->f->get_signalling(shost);
if (spi_signalling(shost) == SPI_SIGNAL_SE ||
spi_signalling(shost) == SPI_SIGNAL_HVD ||
- !scsi_device_dt(sdev)) {
+ !spi_support_dt(starget)) {
DV_SET(dt, 0);
} else {
DV_SET(dt, 1);
@@ -1523,7 +1547,21 @@
static __init int spi_transport_init(void)
{
- int error = transport_class_register(&spi_transport_class);
+ int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI,
+ "SCSI Parallel Transport Class");
+ if (!error) {
+ int i;
+
+ for (i = 0; spi_static_device_list[i].vendor; i++)
+ scsi_dev_info_list_add_keyed(1, /* compatible */
+ spi_static_device_list[i].vendor,
+ spi_static_device_list[i].model,
+ NULL,
+ spi_static_device_list[i].flags,
+ SCSI_DEVINFO_SPI);
+ }
+
+ error = transport_class_register(&spi_transport_class);
if (error)
return error;
error = anon_transport_class_register(&spi_device_class);
@@ -1535,6 +1573,7 @@
transport_class_unregister(&spi_transport_class);
anon_transport_class_unregister(&spi_device_class);
transport_class_unregister(&spi_host_class);
+ scsi_dev_info_remove_list(SCSI_DEVINFO_SPI);
}
MODULE_AUTHOR("Martin Hicks");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 878b17a..5616cd7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1307,6 +1307,7 @@
int sense_valid = 0;
int the_result;
int retries = 3;
+ unsigned int alignment;
unsigned long long lba;
unsigned sector_size;
@@ -1358,6 +1359,16 @@
return -EOVERFLOW;
}
+ /* Logical blocks per physical block exponent */
+ sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size;
+
+ /* Lowest aligned logical block */
+ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
+ blk_queue_alignment_offset(sdp->request_queue, alignment);
+ if (alignment && sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "physical block alignment offset: %u\n", alignment);
+
sdkp->capacity = lba + 1;
return sector_size;
}
@@ -1409,6 +1420,7 @@
}
sdkp->capacity = lba + 1;
+ sdkp->hw_sector_size = sector_size;
return sector_size;
}
@@ -1521,11 +1533,17 @@
string_get_size(sz, STRING_UNITS_10, cap_str_10,
sizeof(cap_str_10));
- if (sdkp->first_scan || old_capacity != sdkp->capacity)
+ if (sdkp->first_scan || old_capacity != sdkp->capacity) {
sd_printk(KERN_NOTICE, sdkp,
- "%llu %d-byte hardware sectors: (%s/%s)\n",
+ "%llu %d-byte logical blocks: (%s/%s)\n",
(unsigned long long)sdkp->capacity,
sector_size, cap_str_10, cap_str_2);
+
+ if (sdkp->hw_sector_size != sector_size)
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u-byte physical blocks\n",
+ sdkp->hw_sector_size);
+ }
}
/* Rescale capacity to 512-byte units */
@@ -1538,6 +1556,7 @@
else if (sector_size == 256)
sdkp->capacity >>= 1;
+ blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size);
sdkp->device->sector_size = sector_size;
}
@@ -1776,6 +1795,52 @@
}
/**
+ * sd_read_block_limits - Query disk device for preferred I/O sizes.
+ * @disk: disk to query
+ */
+static void sd_read_block_limits(struct scsi_disk *sdkp)
+{
+ unsigned int sector_sz = sdkp->device->sector_size;
+ char *buffer;
+
+ /* Block Limits VPD */
+ buffer = scsi_get_vpd_page(sdkp->device, 0xb0);
+
+ if (buffer == NULL)
+ return;
+
+ blk_queue_io_min(sdkp->disk->queue,
+ get_unaligned_be16(&buffer[6]) * sector_sz);
+ blk_queue_io_opt(sdkp->disk->queue,
+ get_unaligned_be32(&buffer[12]) * sector_sz);
+
+ kfree(buffer);
+}
+
+/**
+ * sd_read_block_characteristics - Query block dev. characteristics
+ * @disk: disk to query
+ */
+static void sd_read_block_characteristics(struct scsi_disk *sdkp)
+{
+ char *buffer;
+ u16 rot;
+
+ /* Block Device Characteristics VPD */
+ buffer = scsi_get_vpd_page(sdkp->device, 0xb1);
+
+ if (buffer == NULL)
+ return;
+
+ rot = get_unaligned_be16(&buffer[4]);
+
+ if (rot == 1)
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
+
+ kfree(buffer);
+}
+
+/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
* @disk: struct gendisk we care about
@@ -1812,6 +1877,8 @@
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
+ sd_read_block_limits(sdkp);
+ sd_read_block_characteristics(sdkp);
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
@@ -1934,6 +2001,8 @@
add_disk(gd);
sd_dif_config_host(sdkp);
+ sd_revalidate_disk(gd);
+
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
}
@@ -2054,6 +2123,7 @@
async_synchronize_full();
sdkp = dev_get_drvdata(dev);
+ blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
device_del(&sdkp->dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 708778c..8474b5b 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -45,6 +45,7 @@
unsigned int openers; /* protected by BKL for now, yuck */
sector_t capacity; /* size in 512-byte sectors */
u32 index;
+ unsigned short hw_sector_size;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index cd350df..cce0fe4 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -881,6 +881,7 @@
{
struct scsi_cd *cd = dev_get_drvdata(dev);
+ blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
del_gendisk(cd->disk);
mutex_lock(&sr_ref_mutex);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 69ad494..297deb8 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -2321,8 +2321,9 @@
int phase = cmd & 7;
struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
- printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
- sym_name(np), hsts, dbc, sbcl);
+ if (printk_ratelimit())
+ printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
+ sym_name(np), hsts, dbc, sbcl);
/*
* Check that the chip is connected to the SCSI BUS.
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index e371a9c..a07015d 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -398,8 +398,7 @@
{
u8 __iomem *p;
- p = ioremap_nocache(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
+ p = pci_ioremap_bar(dev, 0);
if (p == NULL)
return -ENOMEM;
@@ -423,8 +422,7 @@
{
u8 __iomem *p;
- p = ioremap_nocache(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
+ p = pci_ioremap_bar(dev, 0);
/* FIXME: What if resource_len < OCT_REG_CR_OFF */
if (p != NULL)
writeb(0, p + OCT_REG_CR_OFF);
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 1132c5c..037c1e0 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1320,6 +1320,16 @@
If you have an SGI Altix with an IOC3 serial card,
say Y or M. Otherwise, say N.
+config SERIAL_MSM
+ bool "MSM on-chip serial port support"
+ depends on ARM && ARCH_MSM
+ select SERIAL_CORE
+
+config SERIAL_MSM_CONSOLE
+ bool "MSM serial console support"
+ depends on SERIAL_MSM=y
+ select SERIAL_CORE_CONSOLE
+
config SERIAL_NETX
tristate "NetX serial port support"
depends on ARM && ARCH_NETX
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 45a8658..d5a2998 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -71,6 +71,7 @@
obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
+obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index e2f6b1b..b4a7650 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -38,6 +38,10 @@
#include <asm/cacheflush.h>
#endif
+#ifdef CONFIG_SERIAL_BFIN_MODULE
+# undef CONFIG_EARLY_PRINTK
+#endif
+
/* UART name and device definitions */
#define BFIN_SERIAL_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
@@ -1110,6 +1114,7 @@
bfin_serial_hw_init();
for (i = 0; i < nr_active_ports; i++) {
+ spin_lock_init(&bfin_serial_ports[i].port.lock);
bfin_serial_ports[i].port.uartclk = get_sclk();
bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE;
bfin_serial_ports[i].port.ops = &bfin_serial_pops;
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 9f2891c..cd1b6a4 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -1548,8 +1548,7 @@
goto probe_exit1;
}
- icom_adapter->base_addr = ioremap(icom_adapter->base_addr_pci,
- pci_resource_len(dev, 0));
+ icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
if (!icom_adapter->base_addr)
goto probe_exit1;
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index 107ce2e..00f4577 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -467,7 +467,7 @@
printk(KERN_INFO "jsm: linemap is full, added device failed\n");
continue;
} else
- set_bit((int)line, linemap);
+ set_bit(line, linemap);
brd->channels[i]->uart_port.line = line;
if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
printk(KERN_INFO "jsm: add device failed\n");
@@ -503,7 +503,7 @@
ch = brd->channels[i];
- clear_bit((int)(ch->uart_port.line), linemap);
+ clear_bit(ch->uart_port.line, linemap);
uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
}
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
new file mode 100644
index 0000000..698048f
--- /dev/null
+++ b/drivers/serial/msm_serial.c
@@ -0,0 +1,772 @@
+/*
+ * drivers/serial/msm_serial.c - driver for msm7k serial device and console
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+# define SUPPORT_SYSRQ
+#endif
+
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include "msm_serial.h"
+
+struct msm_port {
+ struct uart_port uart;
+ char name[16];
+ struct clk *clk;
+ unsigned int imr;
+};
+
+#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
+
+static inline void msm_write(struct uart_port *port, unsigned int val,
+ unsigned int off)
+{
+ __raw_writel(val, port->membase + off);
+}
+
+static inline unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+ return __raw_readl(port->membase + off);
+}
+
+static void msm_stop_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_start_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_stop_rx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_enable_ms(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void handle_rx(struct uart_port *port)
+{
+ struct tty_struct *tty = port->info->port.tty;
+ unsigned int sr;
+
+ /*
+ * Handle overrun. My understanding of the hardware is that overrun
+ * is not tied to the RX buffer, so we handle the case out of band.
+ */
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ /* and now the main RX loop */
+ while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ unsigned int c;
+ char flag = TTY_NORMAL;
+
+ c = msm_read(port, UART_RF);
+
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ port->icount.frame++;
+ } else {
+ port->icount.rx++;
+ }
+
+ /* Mask conditions we're ignorning. */
+ sr &= port->read_status_mask;
+
+ if (sr & UART_SR_RX_BREAK) {
+ flag = TTY_BREAK;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ flag = TTY_FRAME;
+ }
+
+ if (!uart_handle_sysrq_char(port, c))
+ tty_insert_flip_char(tty, c, flag);
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
+static void handle_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int sent_tx;
+
+ if (port->x_char) {
+ msm_write(port, port->x_char, UART_TF);
+ port->icount.tx++;
+ port->x_char = 0;
+ }
+
+ while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
+ if (uart_circ_empty(xmit)) {
+ /* disable tx interrupts */
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+ break;
+ }
+
+ msm_write(port, xmit->buf[xmit->tail], UART_TF);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ sent_tx = 1;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void handle_delta_cts(struct uart_port *port)
+{
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ port->icount.cts++;
+ wake_up_interruptible(&port->info->delta_msr_wait);
+}
+
+static irqreturn_t msm_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int misr;
+
+ spin_lock(&port->lock);
+ misr = msm_read(port, UART_MISR);
+ msm_write(port, 0, UART_IMR); /* disable interrupt */
+
+ if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
+ handle_rx(port);
+ if (misr & UART_IMR_TXLEV)
+ handle_tx(port);
+ if (misr & UART_IMR_DELTA_CTS)
+ handle_delta_cts(port);
+
+ msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int msm_tx_empty(struct uart_port *port)
+{
+ return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int msm_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
+}
+
+static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int mr;
+
+ mr = msm_read(port, UART_MR1);
+
+ if (!(mctrl & TIOCM_RTS)) {
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ } else {
+ mr |= UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ }
+}
+
+static void msm_break_ctl(struct uart_port *port, int break_ctl)
+{
+ if (break_ctl)
+ msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ else
+ msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+}
+
+static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+{
+ unsigned int baud_code, rxstale, watermark;
+
+ switch (baud) {
+ case 300:
+ baud_code = UART_CSR_300;
+ rxstale = 1;
+ break;
+ case 600:
+ baud_code = UART_CSR_600;
+ rxstale = 1;
+ break;
+ case 1200:
+ baud_code = UART_CSR_1200;
+ rxstale = 1;
+ break;
+ case 2400:
+ baud_code = UART_CSR_2400;
+ rxstale = 1;
+ break;
+ case 4800:
+ baud_code = UART_CSR_4800;
+ rxstale = 1;
+ break;
+ case 9600:
+ baud_code = UART_CSR_9600;
+ rxstale = 2;
+ break;
+ case 14400:
+ baud_code = UART_CSR_14400;
+ rxstale = 3;
+ break;
+ case 19200:
+ baud_code = UART_CSR_19200;
+ rxstale = 4;
+ break;
+ case 28800:
+ baud_code = UART_CSR_28800;
+ rxstale = 6;
+ break;
+ case 38400:
+ baud_code = UART_CSR_38400;
+ rxstale = 8;
+ break;
+ case 57600:
+ baud_code = UART_CSR_57600;
+ rxstale = 16;
+ break;
+ case 115200:
+ default:
+ baud_code = UART_CSR_115200;
+ baud = 115200;
+ rxstale = 31;
+ break;
+ }
+
+ msm_write(port, baud_code, UART_CSR);
+
+ /* RX stale watermark */
+ watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark |= UART_IPR_RXSTALE_LAST;
+ watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
+ msm_write(port, watermark, UART_IPR);
+
+ /* set RX watermark */
+ watermark = (port->fifosize * 3) / 4;
+ msm_write(port, watermark, UART_RFWR);
+
+ /* set TX watermark */
+ msm_write(port, 10, UART_TFWR);
+
+ return baud;
+}
+
+static void msm_reset(struct uart_port *port)
+{
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
+
+static void msm_init_clock(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ clk_enable(msm_port->clk);
+
+ msm_write(port, 0xC0, UART_MREG);
+ msm_write(port, 0xB2, UART_NREG);
+ msm_write(port, 0x7D, UART_DREG);
+ msm_write(port, 0x1C, UART_MNDREG);
+}
+
+static int msm_startup(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int data, rfr_level;
+ int ret;
+
+ snprintf(msm_port->name, sizeof(msm_port->name),
+ "msm_serial%d", port->line);
+
+ ret = request_irq(port->irq, msm_irq, IRQF_TRIGGER_HIGH,
+ msm_port->name, port);
+ if (unlikely(ret))
+ return ret;
+
+ msm_init_clock(port);
+
+ if (likely(port->fifosize > 12))
+ rfr_level = port->fifosize - 12;
+ else
+ rfr_level = port->fifosize;
+
+ /* set automatic RFR level */
+ data = msm_read(port, UART_MR1);
+ data &= ~UART_MR1_AUTO_RFR_LEVEL1;
+ data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
+ data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, UART_MR1);
+
+ /* make sure that RXSTALE count is non-zero */
+ data = msm_read(port, UART_IPR);
+ if (unlikely(!data)) {
+ data |= UART_IPR_RXSTALE_LAST;
+ data |= UART_IPR_STALE_LSB;
+ msm_write(port, data, UART_IPR);
+ }
+
+ msm_reset(port);
+
+ msm_write(port, 0x05, UART_CR); /* enable TX & RX */
+
+ /* turn on RX and CTS interrupts */
+ msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
+ UART_IMR_CURRENT_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ return 0;
+}
+
+static void msm_shutdown(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr = 0;
+ msm_write(port, 0, UART_IMR); /* disable interrupts */
+
+ clk_disable(msm_port->clk);
+
+ free_irq(port->irq, port);
+}
+
+static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, mr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* calculate and set baud rate */
+ baud = uart_get_baud_rate(port, termios, old, 300, 115200);
+ baud = msm_set_baud_rate(port, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ /* calculate parity */
+ mr = msm_read(port, UART_MR2);
+ mr &= ~UART_MR2_PARITY_MODE;
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & PARODD)
+ mr |= UART_MR2_PARITY_MODE_ODD;
+ else if (termios->c_cflag & CMSPAR)
+ mr |= UART_MR2_PARITY_MODE_SPACE;
+ else
+ mr |= UART_MR2_PARITY_MODE_EVEN;
+ }
+
+ /* calculate bits per char */
+ mr &= ~UART_MR2_BITS_PER_CHAR;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ mr |= UART_MR2_BITS_PER_CHAR_5;
+ break;
+ case CS6:
+ mr |= UART_MR2_BITS_PER_CHAR_6;
+ break;
+ case CS7:
+ mr |= UART_MR2_BITS_PER_CHAR_7;
+ break;
+ case CS8:
+ default:
+ mr |= UART_MR2_BITS_PER_CHAR_8;
+ break;
+ }
+
+ /* calculate stop bits */
+ mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ if (termios->c_cflag & CSTOPB)
+ mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ else
+ mr |= UART_MR2_STOP_BIT_LEN_ONE;
+
+ /* set parity, bits per char, and stop bit */
+ msm_write(port, mr, UART_MR2);
+
+ /* calculate and set hardware flow control */
+ mr = msm_read(port, UART_MR1);
+ mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ if (termios->c_cflag & CRTSCTS) {
+ mr |= UART_MR1_CTS_CTL;
+ mr |= UART_MR1_RX_RDY_CTL;
+ }
+ msm_write(port, mr, UART_MR1);
+
+ /* Configure status bits to ignore based on termio flags. */
+ port->read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_SR_RX_BREAK;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *msm_type(struct uart_port *port)
+{
+ return "MSM";
+}
+
+static void msm_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *resource;
+ resource_size_t size;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return;
+ size = resource->end - resource->start + 1;
+
+ release_mem_region(port->mapbase, size);
+ iounmap(port->membase);
+ port->membase = NULL;
+}
+
+static int msm_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *resource;
+ resource_size_t size;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ size = resource->end - resource->start + 1;
+
+ if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
+ return -EBUSY;
+
+ port->membase = ioremap(port->mapbase, size);
+ if (!port->membase) {
+ release_mem_region(port->mapbase, size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void msm_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_MSM;
+ msm_request_port(port);
+ }
+}
+
+static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM))
+ return -EINVAL;
+ if (unlikely(port->irq != ser->irq))
+ return -EINVAL;
+ return 0;
+}
+
+static void msm_power(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ switch (state) {
+ case 0:
+ clk_enable(msm_port->clk);
+ break;
+ case 3:
+ clk_disable(msm_port->clk);
+ break;
+ default:
+ printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
+ }
+}
+
+static struct uart_ops msm_uart_pops = {
+ .tx_empty = msm_tx_empty,
+ .set_mctrl = msm_set_mctrl,
+ .get_mctrl = msm_get_mctrl,
+ .stop_tx = msm_stop_tx,
+ .start_tx = msm_start_tx,
+ .stop_rx = msm_stop_rx,
+ .enable_ms = msm_enable_ms,
+ .break_ctl = msm_break_ctl,
+ .startup = msm_startup,
+ .shutdown = msm_shutdown,
+ .set_termios = msm_set_termios,
+ .type = msm_type,
+ .release_port = msm_release_port,
+ .request_port = msm_request_port,
+ .config_port = msm_config_port,
+ .verify_port = msm_verify_port,
+ .pm = msm_power,
+};
+
+static struct msm_port msm_uart_ports[] = {
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 512,
+ .line = 0,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 512,
+ .line = 1,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 64,
+ .line = 2,
+ },
+ },
+};
+
+#define UART_NR ARRAY_SIZE(msm_uart_ports)
+
+static inline struct uart_port *get_port_from_line(unsigned int line)
+{
+ return &msm_uart_ports[line].uart;
+}
+
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+
+static void msm_console_putchar(struct uart_port *port, int c)
+{
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ ;
+ msm_write(port, c, UART_TF);
+}
+
+static void msm_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port;
+ struct msm_port *msm_port;
+
+ BUG_ON(co->index < 0 || co->index >= UART_NR);
+
+ port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
+
+ spin_lock(&port->lock);
+ uart_console_write(port, s, count, msm_console_putchar);
+ spin_unlock(&port->lock);
+}
+
+static int __init msm_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud, flow, bits, parity;
+
+ if (unlikely(co->index >= UART_NR || co->index < 0))
+ return -ENXIO;
+
+ port = get_port_from_line(co->index);
+
+ if (unlikely(!port->membase))
+ return -ENXIO;
+
+ port->cons = co;
+
+ msm_init_clock(port);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ bits = 8;
+ parity = 'n';
+ flow = 'n';
+ msm_write(port, UART_MR2_BITS_PER_CHAR_8 | UART_MR2_STOP_BIT_LEN_ONE,
+ UART_MR2); /* 8N1 */
+
+ if (baud < 300 || baud > 115200)
+ baud = 115200;
+ msm_set_baud_rate(port, baud);
+
+ msm_reset(port);
+
+ printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver msm_uart_driver;
+
+static struct console msm_console = {
+ .name = "ttyMSM",
+ .write = msm_console_write,
+ .device = uart_console_device,
+ .setup = msm_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &msm_uart_driver,
+};
+
+#define MSM_CONSOLE (&msm_console)
+
+#else
+#define MSM_CONSOLE NULL
+#endif
+
+static struct uart_driver msm_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "msm_serial",
+ .dev_name = "ttyMSM",
+ .nr = UART_NR,
+ .cons = MSM_CONSOLE,
+};
+
+static int __init msm_serial_probe(struct platform_device *pdev)
+{
+ struct msm_port *msm_port;
+ struct resource *resource;
+ struct uart_port *port;
+
+ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ return -ENXIO;
+
+ printk(KERN_INFO "msm_serial: detected port #%d\n", pdev->id);
+
+ port = get_port_from_line(pdev->id);
+ port->dev = &pdev->dev;
+ msm_port = UART_TO_MSM(port);
+
+ msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+ if (unlikely(IS_ERR(msm_port->clk)))
+ return PTR_ERR(msm_port->clk);
+ port->uartclk = clk_get_rate(msm_port->clk);
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ port->mapbase = resource->start;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (unlikely(port->irq < 0))
+ return -ENXIO;
+
+ platform_set_drvdata(pdev, port);
+
+ return uart_add_one_port(&msm_uart_driver, port);
+}
+
+static int __devexit msm_serial_remove(struct platform_device *pdev)
+{
+ struct msm_port *msm_port = platform_get_drvdata(pdev);
+
+ clk_put(msm_port->clk);
+
+ return 0;
+}
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_serial_probe,
+ .remove = msm_serial_remove,
+ .driver = {
+ .name = "msm_serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&msm_uart_driver);
+ if (unlikely(ret))
+ return ret;
+
+ ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe);
+ if (unlikely(ret))
+ uart_unregister_driver(&msm_uart_driver);
+
+ printk(KERN_INFO "msm_serial: driver initialized\n");
+
+ return ret;
+}
+
+static void __exit msm_serial_exit(void)
+{
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ unregister_console(&msm_console);
+#endif
+ platform_driver_unregister(&msm_platform_driver);
+ uart_unregister_driver(&msm_uart_driver);
+}
+
+module_init(msm_serial_init);
+module_exit(msm_serial_exit);
+
+MODULE_AUTHOR("Robert Love <rlove@google.com>");
+MODULE_DESCRIPTION("Driver for msm7x serial device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/msm_serial.h b/drivers/serial/msm_serial.h
new file mode 100644
index 0000000..689f1fa
--- /dev/null
+++ b/drivers/serial/msm_serial.h
@@ -0,0 +1,117 @@
+/*
+ * drivers/serial/msm_serial.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_SERIAL_MSM_SERIAL_H
+#define __DRIVERS_SERIAL_MSM_SERIAL_H
+
+#define UART_MR1 0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define UART_MR1_RX_RDY_CTL (1 << 7)
+#define UART_MR1_CTS_CTL (1 << 6)
+
+#define UART_MR2 0x0004
+#define UART_MR2_ERROR_MODE (1 << 6)
+#define UART_MR2_BITS_PER_CHAR 0x30
+#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE 0x0
+#define UART_MR2_PARITY_MODE_ODD 0x1
+#define UART_MR2_PARITY_MODE_EVEN 0x2
+#define UART_MR2_PARITY_MODE_SPACE 0x3
+#define UART_MR2_PARITY_MODE 0x3
+
+#define UART_CSR 0x0008
+#define UART_CSR_115200 0xFF
+#define UART_CSR_57600 0xEE
+#define UART_CSR_38400 0xDD
+#define UART_CSR_28800 0xCC
+#define UART_CSR_19200 0xBB
+#define UART_CSR_14400 0xAA
+#define UART_CSR_9600 0x99
+#define UART_CSR_4800 0x77
+#define UART_CSR_2400 0x55
+#define UART_CSR_1200 0x44
+#define UART_CSR_600 0x33
+#define UART_CSR_300 0x22
+
+#define UART_TF 0x000C
+
+#define UART_CR 0x0010
+#define UART_CR_CMD_NULL (0 << 4)
+#define UART_CR_CMD_RESET_RX (1 << 4)
+#define UART_CR_CMD_RESET_TX (2 << 4)
+#define UART_CR_CMD_RESET_ERR (3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define UART_CR_CMD_START_BREAK (5 << 4)
+#define UART_CR_CMD_STOP_BREAK (6 << 4)
+#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_PACKET_MODE (9 << 4)
+#define UART_CR_CMD_MODE_RESET (12 << 4)
+#define UART_CR_CMD_SET_RFR (13 << 4)
+#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_TX_DISABLE (1 << 3)
+#define UART_CR_TX_ENABLE (1 << 3)
+#define UART_CR_RX_DISABLE (1 << 3)
+#define UART_CR_RX_ENABLE (1 << 3)
+
+#define UART_IMR 0x0014
+#define UART_IMR_TXLEV (1 << 0)
+#define UART_IMR_RXSTALE (1 << 3)
+#define UART_IMR_RXLEV (1 << 4)
+#define UART_IMR_DELTA_CTS (1 << 5)
+#define UART_IMR_CURRENT_CTS (1 << 6)
+
+#define UART_IPR_RXSTALE_LAST 0x20
+#define UART_IPR_STALE_LSB 0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+
+#define UART_IPR 0x0018
+#define UART_TFWR 0x001C
+#define UART_RFWR 0x0020
+#define UART_HCR 0x0024
+
+#define UART_MREG 0x0028
+#define UART_NREG 0x002C
+#define UART_DREG 0x0030
+#define UART_MNDREG 0x0034
+#define UART_IRDA 0x0038
+#define UART_MISR_MODE 0x0040
+#define UART_MISR_RESET 0x0044
+#define UART_MISR_EXPORT 0x0048
+#define UART_MISR_VAL 0x004C
+#define UART_TEST_CTRL 0x0050
+
+#define UART_SR 0x0008
+#define UART_SR_HUNT_CHAR (1 << 7)
+#define UART_SR_RX_BREAK (1 << 6)
+#define UART_SR_PAR_FRAME_ERR (1 << 5)
+#define UART_SR_OVERRUN (1 << 4)
+#define UART_SR_TX_EMPTY (1 << 3)
+#define UART_SR_TX_READY (1 << 2)
+#define UART_SR_RX_FULL (1 << 1)
+#define UART_SR_RX_READY (1 << 0)
+
+#define UART_RF 0x000C
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+
+#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c
index 4873f29..fb00ed5 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/serial/s3c2400.c
@@ -78,7 +78,7 @@
static struct platform_driver s3c2400_serial_drv = {
.probe = s3c2400_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2400-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 87c182e..b5d7cbc 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -90,7 +90,7 @@
static struct platform_driver s3c2410_serial_drv = {
.probe = s3c2410_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2410-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c
index fd017b3..11dcb90 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/serial/s3c2412.c
@@ -123,7 +123,7 @@
static struct platform_driver s3c2412_serial_drv = {
.probe = s3c2412_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2412-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 29cbb0a..06c5b0c 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -153,7 +153,7 @@
static struct platform_driver s3c2440_serial_drv = {
.probe = s3c2440_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2440-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c
index ebf2fd3..786a067 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/serial/s3c24a0.c
@@ -94,7 +94,7 @@
static struct platform_driver s3c24a0_serial_drv = {
.probe = s3c24a0_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c24a0-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c
index 3e37852..48f1a37 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/serial/s3c6400.c
@@ -124,7 +124,7 @@
static struct platform_driver s3c6400_serial_drv = {
.probe = s3c6400_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c6400-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 93b5d75..c8851a0 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -1174,7 +1174,7 @@
EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
-int s3c24xx_serial_remove(struct platform_device *dev)
+int __devexit s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
diff --git a/drivers/serial/samsung.h b/drivers/serial/samsung.h
index 7afb948..d3fe315 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/serial/samsung.h
@@ -72,7 +72,7 @@
extern int s3c24xx_serial_probe(struct platform_device *dev,
struct s3c24xx_uart_info *uart);
-extern int s3c24xx_serial_remove(struct platform_device *dev);
+extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
struct s3c24xx_uart_info *uart);
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
index a4fb343a..319e8b8 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/serial/sb1250-duart.c
@@ -204,7 +204,7 @@
{
int loops = 10000;
- while (sbd_receive_ready(sport) && loops--)
+ while (sbd_receive_ready(sport) && --loops)
read_sbdchn(sport, R_DUART_RX_HOLD);
return loops;
}
@@ -218,7 +218,7 @@
{
int loops = 10000;
- while (!sbd_transmit_ready(sport) && loops--)
+ while (!sbd_transmit_ready(sport) && --loops)
udelay(2);
return loops;
}
@@ -232,7 +232,7 @@
{
int loops = 10000;
- while (!sbd_transmit_empty(sport) && loops--)
+ while (!sbd_transmit_empty(sport) && --loops)
udelay(2);
return loops;
}
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 7313c2e..54dd16d 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -461,6 +461,94 @@
spin_unlock_irqrestore(&up->port.lock, flags);
}
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_txx9_port *up)
+{
+ unsigned int tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ while (--tmout &&
+ !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
+ udelay(1);
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
+ udelay(1);
+ }
+}
+#endif
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial_txx9_get_poll_char(struct uart_port *port)
+{
+ unsigned int ier;
+ unsigned char c;
+ struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ while (sio_in(up, TXX9_SIDISR) & TXX9_SIDISR_UVALID)
+ ;
+
+ c = sio_in(up, TXX9_SIRFIFO);
+
+ /*
+ * Finally, clear RX interrupt status
+ * and restore the IER
+ */
+ sio_mask(up, TXX9_SIDISR, TXX9_SIDISR_RDIS);
+ sio_out(up, TXX9_SIDICR, ier);
+ return c;
+}
+
+
+static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
+{
+ unsigned int ier;
+ struct uart_txx9_port *up = (struct uart_txx9_port *)port;
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = sio_in(up, TXX9_SIDICR);
+ sio_out(up, TXX9_SIDICR, 0);
+
+ wait_for_xmitr(up);
+ /*
+ * Send the character out.
+ * If a LF, also do CR...
+ */
+ sio_out(up, TXX9_SITFIFO, c);
+ if (c == 10) {
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SITFIFO, 13);
+ }
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(up);
+ sio_out(up, TXX9_SIDICR, ier);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
static int serial_txx9_startup(struct uart_port *port)
{
struct uart_txx9_port *up = (struct uart_txx9_port *)port;
@@ -781,6 +869,10 @@
.release_port = serial_txx9_release_port,
.request_port = serial_txx9_request_port,
.config_port = serial_txx9_config_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = serial_txx9_get_poll_char,
+ .poll_put_char = serial_txx9_put_poll_char,
+#endif
};
static struct uart_txx9_port serial_txx9_ports[UART_NR];
@@ -803,27 +895,6 @@
#ifdef CONFIG_SERIAL_TXX9_CONSOLE
-/*
- * Wait for transmitter & holding register to empty
- */
-static inline void wait_for_xmitr(struct uart_txx9_port *up)
-{
- unsigned int tmout = 10000;
-
- /* Wait up to 10ms for the character(s) to be sent. */
- while (--tmout &&
- !(sio_in(up, TXX9_SICISR) & TXX9_SICISR_TXALS))
- udelay(1);
-
- /* Wait up to 1s for flow control if necessary */
- if (up->port.flags & UPF_CONS_FLOW) {
- tmout = 1000000;
- while (--tmout &&
- (sio_in(up, TXX9_SICISR) & TXX9_SICISR_CTSS))
- udelay(1);
- }
-}
-
static void serial_txx9_console_putchar(struct uart_port *port, int ch)
{
struct uart_txx9_port *up = (struct uart_txx9_port *)port;
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index a94a2ab..1df5325 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -461,7 +461,7 @@
break;
udelay(1);
}
- if (limit <= 0)
+ if (limit < 0)
break;
page_bytes -= written;
ra += written;
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index ac9e5d5..063a313 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -33,29 +33,29 @@
struct uart_port port;
struct tasklet_struct tasklet;
int usedma;
- u8 last_ier;
+ u32 last_ier;
struct platform_device *dev;
};
static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
921600, 1843200, 3250000};
-static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier);
static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
static void timbuart_stop_rx(struct uart_port *port)
{
/* spin lock held by upper layer, disable all RX interrupts */
- u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
- iowrite8(ier, port->membase + TIMBUART_IER);
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~RXFLAGS;
+ iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_stop_tx(struct uart_port *port)
{
/* spinlock held by upper layer, disable TX interrupt */
- u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
- iowrite8(ier, port->membase + TIMBUART_IER);
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~TXBAE;
+ iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_start_tx(struct uart_port *port)
@@ -72,14 +72,14 @@
u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
iowrite8(ctl, port->membase + TIMBUART_CTRL);
- iowrite8(TXBF, port->membase + TIMBUART_ISR);
+ iowrite32(TXBF, port->membase + TIMBUART_ISR);
}
static void timbuart_rx_chars(struct uart_port *port)
{
struct tty_struct *tty = port->info->port.tty;
- while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
+ while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
port->icount.rx++;
tty_insert_flip_char(tty, ch, TTY_NORMAL);
@@ -97,7 +97,7 @@
{
struct circ_buf *xmit = &port->info->xmit;
- while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
+ while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) &&
!uart_circ_empty(xmit)) {
iowrite8(xmit->buf[xmit->tail],
port->membase + TIMBUART_TXFIFO);
@@ -114,7 +114,7 @@
ioread8(port->membase + TIMBUART_BAUDRATE));
}
-static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
+static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier)
{
struct timbuart_port *uart =
container_of(port, struct timbuart_port, port);
@@ -129,7 +129,7 @@
if (isr & TXFLAGS) {
timbuart_tx_chars(port);
/* clear all TX interrupts */
- iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
+ iowrite32(TXFLAGS, port->membase + TIMBUART_ISR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -148,7 +148,7 @@
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
-void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
+void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
{
if (isr & RXFLAGS) {
/* Some RX status is set */
@@ -161,7 +161,7 @@
timbuart_rx_chars(port);
/* ack all RX interrupts */
- iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
+ iowrite32(RXFLAGS, port->membase + TIMBUART_ISR);
}
/* always have the RX interrupts enabled */
@@ -173,11 +173,11 @@
void timbuart_tasklet(unsigned long arg)
{
struct timbuart_port *uart = (struct timbuart_port *)arg;
- u8 isr, ier = 0;
+ u32 isr, ier = 0;
spin_lock(&uart->port.lock);
- isr = ioread8(uart->port.membase + TIMBUART_ISR);
+ isr = ioread32(uart->port.membase + TIMBUART_ISR);
dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
if (!uart->usedma)
@@ -188,7 +188,7 @@
if (!uart->usedma)
timbuart_handle_rx_port(&uart->port, isr, &ier);
- iowrite8(ier, uart->port.membase + TIMBUART_IER);
+ iowrite32(ier, uart->port.membase + TIMBUART_IER);
spin_unlock(&uart->port.lock);
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
@@ -196,9 +196,9 @@
static unsigned int timbuart_tx_empty(struct uart_port *port)
{
- u8 isr = ioread8(port->membase + TIMBUART_ISR);
+ u32 isr = ioread32(port->membase + TIMBUART_ISR);
- return (isr & TXBAE) ? TIOCSER_TEMT : 0;
+ return (isr & TXBE) ? TIOCSER_TEMT : 0;
}
static unsigned int timbuart_get_mctrl(struct uart_port *port)
@@ -222,13 +222,13 @@
iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
}
-static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
{
unsigned int cts;
if (isr & CTS_DELTA) {
/* ack */
- iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
+ iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR);
cts = timbuart_get_mctrl(port);
uart_handle_cts_change(port, cts & TIOCM_CTS);
wake_up_interruptible(&port->info->delta_msr_wait);
@@ -255,9 +255,9 @@
dev_dbg(port->dev, "%s\n", __func__);
iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
- iowrite8(0xff, port->membase + TIMBUART_ISR);
+ iowrite32(0x1ff, port->membase + TIMBUART_ISR);
/* Enable all but TX interrupts */
- iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
+ iowrite32(RXBAF | RXBF | RXTT | CTS_DELTA,
port->membase + TIMBUART_IER);
return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
@@ -270,7 +270,7 @@
container_of(port, struct timbuart_port, port);
dev_dbg(port->dev, "%s\n", __func__);
free_irq(port->irq, uart);
- iowrite8(0, port->membase + TIMBUART_IER);
+ iowrite32(0, port->membase + TIMBUART_IER);
}
static int get_bindex(int baud)
@@ -359,10 +359,10 @@
struct timbuart_port *uart = (struct timbuart_port *)devid;
if (ioread8(uart->port.membase + TIMBUART_IPR)) {
- uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
+ uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER);
/* disable interrupts, the tasklet enables them again */
- iowrite8(0, uart->port.membase + TIMBUART_IER);
+ iowrite32(0, uart->port.membase + TIMBUART_IER);
/* fire off bottom half */
tasklet_schedule(&uart->tasklet);
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
index 9e6a873..d8c2809 100644
--- a/drivers/serial/zs.c
+++ b/drivers/serial/zs.c
@@ -231,7 +231,7 @@
{
int loops = 10000;
- while ((read_zsreg(zport, R0) & Rx_CH_AV) && loops--)
+ while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops)
read_zsdata(zport);
return loops;
}
@@ -241,7 +241,7 @@
struct zs_scc *scc = zport->scc;
int loops = 10000;
- while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && loops--) {
+ while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
@@ -254,7 +254,7 @@
struct zs_scc *scc = zport->scc;
int loops = 10000;
- while (!(read_zsreg(zport, R1) & ALL_SNT) && loops--) {
+ while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index 3c839e3..c0a583c 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -12,7 +12,6 @@
obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
octeon-ethernet-objs := ethernet.o
-octeon-ethernet-objs += ethernet-common.o
octeon-ethernet-objs += ethernet-mdio.o
octeon-ethernet-objs += ethernet-mem.o
octeon-ethernet-objs += ethernet-proc.o
diff --git a/drivers/staging/octeon/ethernet-common.c b/drivers/staging/octeon/ethernet-common.c
deleted file mode 100644
index 3e6f5b8..0000000
--- a/drivers/staging/octeon/ethernet-common.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/**********************************************************************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
-**********************************************************************/
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <net/dst.h>
-
-#include <asm/atomic.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
-#include "ethernet-tx.h"
-#include "ethernet-mdio.h"
-#include "ethernet-util.h"
-#include "octeon-ethernet.h"
-#include "ethernet-common.h"
-
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-fau.h"
-#include "cvmx-helper.h"
-
-#include "cvmx-gmxx-defs.h"
-
-/**
- * Get the low level ethernet statistics
- *
- * @dev: Device to get the statistics from
- * Returns Pointer to the statistics
- */
-static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
-{
- cvmx_pip_port_status_t rx_status;
- cvmx_pko_port_status_t tx_status;
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (octeon_is_simulation()) {
- /* The simulator doesn't support statistics */
- memset(&rx_status, 0, sizeof(rx_status));
- memset(&tx_status, 0, sizeof(tx_status));
- } else {
- cvmx_pip_get_port_status(priv->port, 1, &rx_status);
- cvmx_pko_get_port_status(priv->port, 1, &tx_status);
- }
-
- priv->stats.rx_packets += rx_status.inb_packets;
- priv->stats.tx_packets += tx_status.packets;
- priv->stats.rx_bytes += rx_status.inb_octets;
- priv->stats.tx_bytes += tx_status.octets;
- priv->stats.multicast += rx_status.multicast_packets;
- priv->stats.rx_crc_errors += rx_status.inb_errors;
- priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
-
- /*
- * The drop counter must be incremented atomically
- * since the RX tasklet also increments it.
- */
-#ifdef CONFIG_64BIT
- atomic64_add(rx_status.dropped_packets,
- (atomic64_t *)&priv->stats.rx_dropped);
-#else
- atomic_add(rx_status.dropped_packets,
- (atomic_t *)&priv->stats.rx_dropped);
-#endif
- }
-
- return &priv->stats;
-}
-
-/**
- * Set the multicast list. Currently unimplemented.
- *
- * @dev: Device to work on
- */
-static void cvm_oct_common_set_multicast_list(struct net_device *dev)
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- union cvmx_gmxx_rxx_adr_ctl control;
- control.u64 = 0;
- control.s.bcst = 1; /* Allow broadcast MAC addresses */
-
- if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
- (dev->flags & IFF_PROMISC))
- /* Force accept multicast packets */
- control.s.mcst = 2;
- else
- /* Force reject multicat packets */
- control.s.mcst = 1;
-
- if (dev->flags & IFF_PROMISC)
- /*
- * Reject matches if promisc. Since CAM is
- * shut off, should accept everything.
- */
- control.s.cam_mode = 0;
- else
- /* Filter packets based on the CAM */
- control.s.cam_mode = 1;
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
- control.u64);
- if (dev->flags & IFF_PROMISC)
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 0);
- else
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
- (index, interface), 1);
-
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
-}
-
-/**
- * Set the hardware MAC address for a device
- *
- * @dev: Device to change the MAC address for
- * @addr: Address structure to change it too. MAC address is addr + 2.
- * Returns Zero on success
- */
-static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- memcpy(dev->dev_addr, addr + 2, 6);
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- int i;
- uint8_t *ptr = addr;
- uint64_t mac = 0;
- for (i = 0; i < 6; i++)
- mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64 & ~1ull);
-
- cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
- ptr[2]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
- ptr[3]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
- ptr[4]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
- ptr[5]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
- ptr[6]);
- cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
- ptr[7]);
- cvm_oct_common_set_multicast_list(dev);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmx_cfg.u64);
- }
- return 0;
-}
-
-/**
- * Change the link MTU. Unimplemented
- *
- * @dev: Device to change
- * @new_mtu: The new MTU
- *
- * Returns Zero on success
- */
-static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- int vlan_bytes = 4;
-#else
- int vlan_bytes = 0;
-#endif
-
- /*
- * Limit the MTU to make sure the ethernet packets are between
- * 64 bytes and 65535 bytes.
- */
- if ((new_mtu + 14 + 4 + vlan_bytes < 64)
- || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
- pr_err("MTU must be between %d and %d.\n",
- 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
- return -EINVAL;
- }
- dev->mtu = new_mtu;
-
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
- CVMX_HELPER_INTERFACE_MODE_SPI)) {
- /* Add ethernet header and FCS, and VLAN if configured. */
- int max_packet = new_mtu + 14 + 4 + vlan_bytes;
-
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Signal errors on packets larger than the MTU */
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
- max_packet);
- } else {
- /*
- * Set the hardware to truncate packets larger
- * than the MTU and smaller the 64 bytes.
- */
- union cvmx_pip_frm_len_chkx frm_len_chk;
- frm_len_chk.u64 = 0;
- frm_len_chk.s.minlen = 64;
- frm_len_chk.s.maxlen = max_packet;
- cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
- frm_len_chk.u64);
- }
- /*
- * Set the hardware to truncate packets larger than
- * the MTU. The jabber register must be set to a
- * multiple of 8 bytes, so round up.
- */
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
- (max_packet + 7) & ~7u);
- }
- return 0;
-}
-
-/**
- * Per network device initialization
- *
- * @dev: Device to initialize
- * Returns Zero on success
- */
-int cvm_oct_common_init(struct net_device *dev)
-{
- static int count;
- char mac[8] = { 0x00, 0x00,
- octeon_bootinfo->mac_addr_base[0],
- octeon_bootinfo->mac_addr_base[1],
- octeon_bootinfo->mac_addr_base[2],
- octeon_bootinfo->mac_addr_base[3],
- octeon_bootinfo->mac_addr_base[4],
- octeon_bootinfo->mac_addr_base[5] + count
- };
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- /*
- * Force the interface to use the POW send if always_use_pow
- * was specified or it is in the pow send list.
- */
- if ((pow_send_group != -1)
- && (always_use_pow || strstr(pow_send_list, dev->name)))
- priv->queue = -1;
-
- if (priv->queue != -1) {
- dev->hard_start_xmit = cvm_oct_xmit;
- if (USE_HW_TCPUDP_CHECKSUM)
- dev->features |= NETIF_F_IP_CSUM;
- } else
- dev->hard_start_xmit = cvm_oct_xmit_pow;
- count++;
-
- dev->get_stats = cvm_oct_common_get_stats;
- dev->set_mac_address = cvm_oct_common_set_mac_address;
- dev->set_multicast_list = cvm_oct_common_set_multicast_list;
- dev->change_mtu = cvm_oct_common_change_mtu;
- dev->do_ioctl = cvm_oct_ioctl;
- /* We do our own locking, Linux doesn't need to */
- dev->features |= NETIF_F_LLTX;
- SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = cvm_oct_poll_controller;
-#endif
-
- cvm_oct_mdio_setup_device(dev);
- dev->set_mac_address(dev, mac);
- dev->change_mtu(dev, dev->mtu);
-
- /*
- * Zero out stats for port so we won't mistakenly show
- * counters from the bootloader.
- */
- memset(dev->get_stats(dev), 0, sizeof(struct net_device_stats));
-
- return 0;
-}
-
-void cvm_oct_common_uninit(struct net_device *dev)
-{
- /* Currently nothing to do */
-}
diff --git a/drivers/staging/octeon/ethernet-common.h b/drivers/staging/octeon/ethernet-common.h
deleted file mode 100644
index 2bd9cd7..0000000
--- a/drivers/staging/octeon/ethernet-common.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*********************************************************************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2007 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
-*********************************************************************/
-
-int cvm_oct_common_init(struct net_device *dev);
-void cvm_oct_common_uninit(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index 8f7374e..f13131b 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -117,6 +117,8 @@
/* Maximum number of packets to process per interrupt. */
#define MAX_RX_PACKETS 120
+/* Maximum number of SKBs to try to free per xmit packet. */
+#define MAX_SKB_TO_FREE 10
#define MAX_OUT_QUEUE_DEPTH 1000
#ifndef CONFIG_SMP
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 8579f16..8704133 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -33,7 +33,6 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-helper.h"
@@ -265,7 +264,7 @@
return return_status;
}
-static int cvm_oct_rgmii_open(struct net_device *dev)
+int cvm_oct_rgmii_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -286,7 +285,7 @@
return 0;
}
-static int cvm_oct_rgmii_stop(struct net_device *dev)
+int cvm_oct_rgmii_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -305,9 +304,7 @@
int r;
cvm_oct_common_init(dev);
- dev->open = cvm_oct_rgmii_open;
- dev->stop = cvm_oct_rgmii_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
/*
* Due to GMX errata in CN3XXX series chips, it is necessary
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 58fa39c..2b54996 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -34,13 +34,12 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "ethernet-common.h"
#include "cvmx-helper.h"
#include "cvmx-gmxx-defs.h"
-static int cvm_oct_sgmii_open(struct net_device *dev)
+int cvm_oct_sgmii_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -61,7 +60,7 @@
return 0;
}
-static int cvm_oct_sgmii_stop(struct net_device *dev)
+int cvm_oct_sgmii_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -113,9 +112,7 @@
{
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
- dev->open = cvm_oct_sgmii_open;
- dev->stop = cvm_oct_sgmii_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
if (!octeon_is_simulation())
priv->poll = cvm_oct_sgmii_poll;
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index e0971bb..66190b0 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -33,7 +33,6 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-spi.h"
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 77b7122..81a8513 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -47,6 +47,7 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-tx.h"
#include "ethernet-util.h"
#include "cvmx-wqe.h"
@@ -82,8 +83,10 @@
uint64_t old_scratch2;
int dropped;
int qos;
+ int queue_it_up;
struct octeon_ethernet *priv = netdev_priv(dev);
- int32_t in_use;
+ int32_t skb_to_free;
+ int32_t undo;
int32_t buffers_to_free;
#if REUSE_SKBUFFS_WITHOUT_FREE
unsigned char *fpa_head;
@@ -120,15 +123,15 @@
old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
/*
- * Assume we're going to be able t osend this
- * packet. Fetch and increment the number of pending
- * packets for output.
+ * Fetch and increment the number of packets to be
+ * freed.
*/
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
FAU_NUM_PACKET_BUFFERS_TO_FREE,
0);
cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- priv->fau + qos * 4, 1);
+ priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
}
/*
@@ -253,10 +256,10 @@
/*
* The skbuff will be reused without ever being freed. We must
- * cleanup a bunch of Linux stuff.
+ * cleanup a bunch of core things.
*/
- dst_release(skb->dst);
- skb->dst = NULL;
+ dst_release(skb_dst(skb));
+ skb_dst_set(skb, NULL);
#ifdef CONFIG_XFRM
secpath_put(skb->sp);
skb->sp = NULL;
@@ -286,16 +289,30 @@
if (USE_ASYNC_IOBDMA) {
/* Get the number of skbuffs in use by the hardware */
CVMX_SYNCIOBDMA;
- in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
} else {
/* Get the number of skbuffs in use by the hardware */
- in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1);
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
buffers_to_free =
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}
/*
+ * We try to claim MAX_SKB_TO_FREE buffers. If there were not
+ * that many available, we have to un-claim (undo) any that
+ * were in excess. If skb_to_free is positive we will free
+ * that many buffers.
+ */
+ undo = skb_to_free > 0 ?
+ MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
+ MAX_SKB_TO_FREE : -skb_to_free;
+
+ /*
* If we're sending faster than the receive can free them then
* don't do the HW free.
*/
@@ -330,38 +347,31 @@
cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
}
+ queue_it_up = 0;
if (unlikely(dropped)) {
dev_kfree_skb_any(skb);
- cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
priv->stats.tx_dropped++;
} else {
if (USE_SKBUFFS_IN_HW) {
/* Put this packet on the queue to be freed later */
if (pko_command.s.dontfree)
- skb_queue_tail(&priv->tx_free_list[qos], skb);
- else {
+ queue_it_up = 1;
+ else
cvmx_fau_atomic_add32
(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
- cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
- }
} else {
/* Put this packet on the queue to be freed later */
- skb_queue_tail(&priv->tx_free_list[qos], skb);
+ queue_it_up = 1;
}
}
- /* Free skbuffs not in use by the hardware, possibly two at a time */
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) {
+ if (queue_it_up) {
spin_lock(&priv->tx_free_list[qos].lock);
- /*
- * Check again now that we have the lock. It might
- * have changed.
- */
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
- if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
- dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ __skb_queue_tail(&priv->tx_free_list[qos], skb);
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0);
spin_unlock(&priv->tx_free_list[qos].lock);
+ } else {
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
}
return 0;
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index 5106236f..c0bebf7 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -30,3 +30,28 @@
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
void cvm_oct_tx_shutdown(struct net_device *dev);
+
+/**
+ * Free dead transmit skbs.
+ *
+ * @priv: The driver data
+ * @skb_to_free: The number of SKBs to free (free none if negative).
+ * @qos: The queue to free from.
+ * @take_lock: If true, acquire the skb list lock.
+ */
+static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
+ int skb_to_free,
+ int qos, int take_lock)
+{
+ /* Free skbuffs not in use by the hardware. */
+ if (skb_to_free > 0) {
+ if (take_lock)
+ spin_lock(&priv->tx_free_list[qos].lock);
+ while (skb_to_free > 0) {
+ dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
+ skb_to_free--;
+ }
+ if (take_lock)
+ spin_unlock(&priv->tx_free_list[qos].lock);
+ }
+}
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index f08eb32..0c2e7cc 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -33,14 +33,13 @@
#include "ethernet-defines.h"
#include "octeon-ethernet.h"
-#include "ethernet-common.h"
#include "ethernet-util.h"
#include "cvmx-helper.h"
#include "cvmx-gmxx-defs.h"
-static int cvm_oct_xaui_open(struct net_device *dev)
+int cvm_oct_xaui_open(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -60,7 +59,7 @@
return 0;
}
-static int cvm_oct_xaui_stop(struct net_device *dev)
+int cvm_oct_xaui_stop(struct net_device *dev)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
struct octeon_ethernet *priv = netdev_priv(dev);
@@ -112,9 +111,7 @@
{
struct octeon_ethernet *priv = netdev_priv(dev);
cvm_oct_common_init(dev);
- dev->open = cvm_oct_xaui_open;
- dev->stop = cvm_oct_xaui_stop;
- dev->stop(dev);
+ dev->netdev_ops->ndo_stop(dev);
if (!octeon_is_simulation())
priv->poll = cvm_oct_xaui_poll;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index e8ef9e0..b847951 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -37,13 +37,14 @@
#include <asm/octeon/octeon.h>
#include "ethernet-defines.h"
+#include "octeon-ethernet.h"
#include "ethernet-mem.h"
#include "ethernet-rx.h"
#include "ethernet-tx.h"
+#include "ethernet-mdio.h"
#include "ethernet-util.h"
#include "ethernet-proc.h"
-#include "ethernet-common.h"
-#include "octeon-ethernet.h"
+
#include "cvmx-pip.h"
#include "cvmx-pko.h"
@@ -51,6 +52,7 @@
#include "cvmx-ipd.h"
#include "cvmx-helper.h"
+#include "cvmx-gmxx-defs.h"
#include "cvmx-smix-defs.h"
#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
@@ -129,53 +131,55 @@
*/
static void cvm_do_timer(unsigned long arg)
{
+ int32_t skb_to_free, undo;
+ int queues_per_port;
+ int qos;
+ struct octeon_ethernet *priv;
static int port;
- if (port < CVMX_PIP_NUM_INPUT_PORTS) {
- if (cvm_oct_device[port]) {
- int queues_per_port;
- int qos;
- struct octeon_ethernet *priv =
- netdev_priv(cvm_oct_device[port]);
- if (priv->poll) {
- /* skip polling if we don't get the lock */
- if (!down_trylock(&mdio_sem)) {
- priv->poll(cvm_oct_device[port]);
- up(&mdio_sem);
- }
- }
- queues_per_port = cvmx_pko_get_num_queues(port);
- /* Drain any pending packets in the free list */
- for (qos = 0; qos < queues_per_port; qos++) {
- if (skb_queue_len(&priv->tx_free_list[qos])) {
- spin_lock(&priv->tx_free_list[qos].
- lock);
- while (skb_queue_len
- (&priv->tx_free_list[qos]) >
- cvmx_fau_fetch_and_add32(priv->
- fau +
- qos * 4,
- 0))
- dev_kfree_skb(__skb_dequeue
- (&priv->
- tx_free_list
- [qos]));
- spin_unlock(&priv->tx_free_list[qos].
- lock);
- }
- }
- cvm_oct_device[port]->get_stats(cvm_oct_device[port]);
- }
- port++;
- /* Poll the next port in a 50th of a second.
- This spreads the polling of ports out a little bit */
- mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
- } else {
+ if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
+ /*
+ * All ports have been polled. Start the next
+ * iteration through the ports in one second.
+ */
port = 0;
- /* All ports have been polled. Start the next iteration through
- the ports in one second */
mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
+ return;
}
+ if (!cvm_oct_device[port])
+ goto out;
+
+ priv = netdev_priv(cvm_oct_device[port]);
+ if (priv->poll) {
+ /* skip polling if we don't get the lock */
+ if (!down_trylock(&mdio_sem)) {
+ priv->poll(cvm_oct_device[port]);
+ up(&mdio_sem);
+ }
+ }
+
+ queues_per_port = cvmx_pko_get_num_queues(port);
+ /* Drain any pending packets in the free list */
+ for (qos = 0; qos < queues_per_port; qos++) {
+ if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
+ continue;
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);
+ undo = skb_to_free > 0 ?
+ MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
+ if (undo > 0)
+ cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
+ skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
+ MAX_SKB_TO_FREE : -skb_to_free;
+ cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
+ }
+ cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
+
+out:
+ port++;
+ /* Poll the next port in a 50th of a second.
+ This spreads the polling of ports out a little bit */
+ mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
}
/**
@@ -246,6 +250,362 @@
EXPORT_SYMBOL(cvm_oct_free_work);
/**
+ * Get the low level ethernet statistics
+ *
+ * @dev: Device to get the statistics from
+ * Returns Pointer to the statistics
+ */
+static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
+{
+ cvmx_pip_port_status_t rx_status;
+ cvmx_pko_port_status_t tx_status;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
+ if (octeon_is_simulation()) {
+ /* The simulator doesn't support statistics */
+ memset(&rx_status, 0, sizeof(rx_status));
+ memset(&tx_status, 0, sizeof(tx_status));
+ } else {
+ cvmx_pip_get_port_status(priv->port, 1, &rx_status);
+ cvmx_pko_get_port_status(priv->port, 1, &tx_status);
+ }
+
+ priv->stats.rx_packets += rx_status.inb_packets;
+ priv->stats.tx_packets += tx_status.packets;
+ priv->stats.rx_bytes += rx_status.inb_octets;
+ priv->stats.tx_bytes += tx_status.octets;
+ priv->stats.multicast += rx_status.multicast_packets;
+ priv->stats.rx_crc_errors += rx_status.inb_errors;
+ priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
+
+ /*
+ * The drop counter must be incremented atomically
+ * since the RX tasklet also increments it.
+ */
+#ifdef CONFIG_64BIT
+ atomic64_add(rx_status.dropped_packets,
+ (atomic64_t *)&priv->stats.rx_dropped);
+#else
+ atomic_add(rx_status.dropped_packets,
+ (atomic_t *)&priv->stats.rx_dropped);
+#endif
+ }
+
+ return &priv->stats;
+}
+
+/**
+ * Change the link MTU. Unimplemented
+ *
+ * @dev: Device to change
+ * @new_mtu: The new MTU
+ *
+ * Returns Zero on success
+ */
+static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ int vlan_bytes = 4;
+#else
+ int vlan_bytes = 0;
+#endif
+
+ /*
+ * Limit the MTU to make sure the ethernet packets are between
+ * 64 bytes and 65535 bytes.
+ */
+ if ((new_mtu + 14 + 4 + vlan_bytes < 64)
+ || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
+ pr_err("MTU must be between %d and %d.\n",
+ 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ /* Add ethernet header and FCS, and VLAN if configured. */
+ int max_packet = new_mtu + 14 + 4 + vlan_bytes;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ /* Signal errors on packets larger than the MTU */
+ cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
+ max_packet);
+ } else {
+ /*
+ * Set the hardware to truncate packets larger
+ * than the MTU and smaller the 64 bytes.
+ */
+ union cvmx_pip_frm_len_chkx frm_len_chk;
+ frm_len_chk.u64 = 0;
+ frm_len_chk.s.minlen = 64;
+ frm_len_chk.s.maxlen = max_packet;
+ cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
+ frm_len_chk.u64);
+ }
+ /*
+ * Set the hardware to truncate packets larger than
+ * the MTU. The jabber register must be set to a
+ * multiple of 8 bytes, so round up.
+ */
+ cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
+ (max_packet + 7) & ~7u);
+ }
+ return 0;
+}
+
+/**
+ * Set the multicast list. Currently unimplemented.
+ *
+ * @dev: Device to work on
+ */
+static void cvm_oct_common_set_multicast_list(struct net_device *dev)
+{
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ union cvmx_gmxx_rxx_adr_ctl control;
+ control.u64 = 0;
+ control.s.bcst = 1; /* Allow broadcast MAC addresses */
+
+ if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
+ (dev->flags & IFF_PROMISC))
+ /* Force accept multicast packets */
+ control.s.mcst = 2;
+ else
+ /* Force reject multicat packets */
+ control.s.mcst = 1;
+
+ if (dev->flags & IFF_PROMISC)
+ /*
+ * Reject matches if promisc. Since CAM is
+ * shut off, should accept everything.
+ */
+ control.s.cam_mode = 0;
+ else
+ /* Filter packets based on the CAM */
+ control.s.cam_mode = 1;
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
+ control.u64);
+ if (dev->flags & IFF_PROMISC)
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 0);
+ else
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
+ (index, interface), 1);
+
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+}
+
+/**
+ * Set the hardware MAC address for a device
+ *
+ * @dev: Device to change the MAC address for
+ * @addr: Address structure to change it too. MAC address is addr + 2.
+ * Returns Zero on success
+ */
+static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ union cvmx_gmxx_prtx_cfg gmx_cfg;
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+
+ memcpy(dev->dev_addr, addr + 2, 6);
+
+ if ((interface < 2)
+ && (cvmx_helper_interface_get_mode(interface) !=
+ CVMX_HELPER_INTERFACE_MODE_SPI)) {
+ int i;
+ uint8_t *ptr = addr;
+ uint64_t mac = 0;
+ for (i = 0; i < 6; i++)
+ mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
+
+ gmx_cfg.u64 =
+ cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64 & ~1ull);
+
+ cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
+ ptr[2]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
+ ptr[3]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
+ ptr[4]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
+ ptr[5]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
+ ptr[6]);
+ cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
+ ptr[7]);
+ cvm_oct_common_set_multicast_list(dev);
+ cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
+ gmx_cfg.u64);
+ }
+ return 0;
+}
+
+/**
+ * Per network device initialization
+ *
+ * @dev: Device to initialize
+ * Returns Zero on success
+ */
+int cvm_oct_common_init(struct net_device *dev)
+{
+ static int count;
+ char mac[8] = { 0x00, 0x00,
+ octeon_bootinfo->mac_addr_base[0],
+ octeon_bootinfo->mac_addr_base[1],
+ octeon_bootinfo->mac_addr_base[2],
+ octeon_bootinfo->mac_addr_base[3],
+ octeon_bootinfo->mac_addr_base[4],
+ octeon_bootinfo->mac_addr_base[5] + count
+ };
+ struct octeon_ethernet *priv = netdev_priv(dev);
+
+ /*
+ * Force the interface to use the POW send if always_use_pow
+ * was specified or it is in the pow send list.
+ */
+ if ((pow_send_group != -1)
+ && (always_use_pow || strstr(pow_send_list, dev->name)))
+ priv->queue = -1;
+
+ if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
+ dev->features |= NETIF_F_IP_CSUM;
+
+ count++;
+
+ /* We do our own locking, Linux doesn't need to */
+ dev->features |= NETIF_F_LLTX;
+ SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
+
+ cvm_oct_mdio_setup_device(dev);
+ dev->netdev_ops->ndo_set_mac_address(dev, mac);
+ dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
+
+ /*
+ * Zero out stats for port so we won't mistakenly show
+ * counters from the bootloader.
+ */
+ memset(dev->netdev_ops->ndo_get_stats(dev), 0,
+ sizeof(struct net_device_stats));
+
+ return 0;
+}
+
+void cvm_oct_common_uninit(struct net_device *dev)
+{
+ /* Currently nothing to do */
+}
+
+static const struct net_device_ops cvm_oct_npi_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_uninit = cvm_oct_common_uninit,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
+ .ndo_init = cvm_oct_xaui_init,
+ .ndo_uninit = cvm_oct_xaui_uninit,
+ .ndo_open = cvm_oct_xaui_open,
+ .ndo_stop = cvm_oct_xaui_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
+ .ndo_init = cvm_oct_sgmii_init,
+ .ndo_uninit = cvm_oct_sgmii_uninit,
+ .ndo_open = cvm_oct_sgmii_open,
+ .ndo_stop = cvm_oct_sgmii_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_spi_netdev_ops = {
+ .ndo_init = cvm_oct_spi_init,
+ .ndo_uninit = cvm_oct_spi_uninit,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
+ .ndo_init = cvm_oct_rgmii_init,
+ .ndo_uninit = cvm_oct_rgmii_uninit,
+ .ndo_open = cvm_oct_rgmii_open,
+ .ndo_stop = cvm_oct_rgmii_stop,
+ .ndo_start_xmit = cvm_oct_xmit,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+static const struct net_device_ops cvm_oct_pow_netdev_ops = {
+ .ndo_init = cvm_oct_common_init,
+ .ndo_start_xmit = cvm_oct_xmit_pow,
+ .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
+ .ndo_set_mac_address = cvm_oct_common_set_mac_address,
+ .ndo_do_ioctl = cvm_oct_ioctl,
+ .ndo_change_mtu = cvm_oct_common_change_mtu,
+ .ndo_get_stats = cvm_oct_common_get_stats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cvm_oct_poll_controller,
+#endif
+};
+
+/**
* Module/ driver initialization. Creates the linux network
* devices.
*
@@ -303,7 +663,7 @@
struct octeon_ethernet *priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct octeon_ethernet));
- dev->init = cvm_oct_common_init;
+ dev->netdev_ops = &cvm_oct_pow_netdev_ops;
priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
priv->port = CVMX_PIP_NUM_INPUT_PORTS;
priv->queue = -1;
@@ -372,44 +732,38 @@
break;
case CVMX_HELPER_INTERFACE_MODE_NPI:
- dev->init = cvm_oct_common_init;
- dev->uninit = cvm_oct_common_uninit;
+ dev->netdev_ops = &cvm_oct_npi_netdev_ops;
strcpy(dev->name, "npi%d");
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
- dev->init = cvm_oct_xaui_init;
- dev->uninit = cvm_oct_xaui_uninit;
+ dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
strcpy(dev->name, "xaui%d");
break;
case CVMX_HELPER_INTERFACE_MODE_LOOP:
- dev->init = cvm_oct_common_init;
- dev->uninit = cvm_oct_common_uninit;
+ dev->netdev_ops = &cvm_oct_npi_netdev_ops;
strcpy(dev->name, "loop%d");
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
- dev->init = cvm_oct_sgmii_init;
- dev->uninit = cvm_oct_sgmii_uninit;
+ dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
- dev->init = cvm_oct_spi_init;
- dev->uninit = cvm_oct_spi_uninit;
+ dev->netdev_ops = &cvm_oct_spi_netdev_ops;
strcpy(dev->name, "spi%d");
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
- dev->init = cvm_oct_rgmii_init;
- dev->uninit = cvm_oct_rgmii_uninit;
+ dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
strcpy(dev->name, "eth%d");
break;
}
- if (!dev->init) {
+ if (!dev->netdev_ops) {
kfree(dev);
} else if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device "
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index b319907..3aef987 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -111,12 +111,23 @@
extern int cvm_oct_rgmii_init(struct net_device *dev);
extern void cvm_oct_rgmii_uninit(struct net_device *dev);
+extern int cvm_oct_rgmii_open(struct net_device *dev);
+extern int cvm_oct_rgmii_stop(struct net_device *dev);
+
extern int cvm_oct_sgmii_init(struct net_device *dev);
extern void cvm_oct_sgmii_uninit(struct net_device *dev);
+extern int cvm_oct_sgmii_open(struct net_device *dev);
+extern int cvm_oct_sgmii_stop(struct net_device *dev);
+
extern int cvm_oct_spi_init(struct net_device *dev);
extern void cvm_oct_spi_uninit(struct net_device *dev);
extern int cvm_oct_xaui_init(struct net_device *dev);
extern void cvm_oct_xaui_uninit(struct net_device *dev);
+extern int cvm_oct_xaui_open(struct net_device *dev);
+extern int cvm_oct_xaui_stop(struct net_device *dev);
+
+extern int cvm_oct_common_init(struct net_device *dev);
+extern void cvm_oct_common_uninit(struct net_device *dev);
extern int always_use_pow;
extern int pow_send_group;
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 581232b..90b29b5 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -284,21 +284,12 @@
return;
}
-static void ProcessRxChar(struct usb_serial_port *port, unsigned char Data)
+static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
+ unsigned char data)
{
- struct tty_struct *tty;
struct urb *urb = port->read_urb;
- tty = tty_port_tty_get(&port->port);
-
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
-
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, &Data, 1);
- /* tty_flip_buffer_push(tty); */
- }
-
- return;
+ if (urb->actual_length)
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
}
static void qt_write_bulk_callback(struct urb *urb)
@@ -435,8 +426,10 @@
case 0xff:
dbg("No status sequence. \n");
- ProcessRxChar(port, data[i]);
- ProcessRxChar(port, data[i + 1]);
+ if (tty) {
+ ProcessRxChar(tty, port, data[i]);
+ ProcessRxChar(tty, port, data[i + 1]);
+ }
i += 2;
break;
}
@@ -444,10 +437,8 @@
continue;
}
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, (data + i), 1);
- }
+ if (tty && urb->actual_length)
+ tty_insert_flip_char(tty, data[i], TTY_NORMAL);
}
tty_flip_buffer_push(tty);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 38bfdb0..3f10459 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -550,7 +550,7 @@
static int acm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct acm *acm;
- int rv = -EINVAL;
+ int rv = -ENODEV;
int i;
dbg("Entering acm_tty_open.");
@@ -677,7 +677,7 @@
/* Perform the closing process and see if we need to do the hardware
shutdown */
- if (tty_port_close_start(&acm->port, tty, filp) == 0)
+ if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0)
return;
acm_port_down(acm, 0);
tty_port_close_end(&acm->port, tty);
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d595aa5..a842164 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -333,6 +333,9 @@
{
struct usb_serial_port *port = tty->driver_data;
+ if (!port)
+ return;
+
dbg("%s - port %d", __func__, port->number);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 559f878..9052bcb 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -501,7 +501,7 @@
int result = -ENOANO;
struct uwb_rceb *rceb = *header;
int event = le16_to_cpu(rceb->wEvent);
- size_t event_size;
+ ssize_t event_size;
size_t core_size, offset;
if (rceb->bEventType != UWB_RC_CET_GENERAL)
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c
index cd20357..86a853b 100644
--- a/drivers/uwb/wlp/txrx.c
+++ b/drivers/uwb/wlp/txrx.c
@@ -326,7 +326,7 @@
int result = -EINVAL;
struct ethhdr *eth_hdr = (void *) skb->data;
- if (is_broadcast_ether_addr(eth_hdr->h_dest)) {
+ if (is_multicast_ether_addr(eth_hdr->h_dest)) {
result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb);
if (result < 0) {
if (printk_ratelimit())
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 135ae18..eef2bb2 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -543,8 +543,7 @@
} else if (analog_on_B) {
CRTCnumber = outputBfromCRTC;
FlatPanel = 0;
- printk("nvidiafb: CRTC %i"
- "appears to have a "
+ printk("nvidiafb: CRTC %i appears to have a "
"CRT attached\n", CRTCnumber);
} else if (slaved_on_A) {
CRTCnumber = 0;
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 1f09d4e..59f708e 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -68,6 +68,34 @@
return w1_ds2760_io(dev, buf, addr, count, 1);
}
+static int w1_ds2760_eeprom_cmd(struct device *dev, int addr, int cmd)
+{
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->mutex);
+
+ if (w1_reset_select_slave(sl) == 0) {
+ w1_write_8(sl->master, cmd);
+ w1_write_8(sl->master, addr);
+ }
+
+ mutex_unlock(&sl->master->mutex);
+ return 0;
+}
+
+int w1_ds2760_store_eeprom(struct device *dev, int addr)
+{
+ return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_COPY_DATA);
+}
+
+int w1_ds2760_recall_eeprom(struct device *dev, int addr)
+{
+ return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
+}
+
static ssize_t w1_ds2760_read_bin(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -200,6 +228,8 @@
EXPORT_SYMBOL(w1_ds2760_read);
EXPORT_SYMBOL(w1_ds2760_write);
+EXPORT_SYMBOL(w1_ds2760_store_eeprom);
+EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
module_init(w1_ds2760_init);
module_exit(w1_ds2760_exit);
diff --git a/drivers/w1/slaves/w1_ds2760.h b/drivers/w1/slaves/w1_ds2760.h
index f130242..58e7741 100644
--- a/drivers/w1/slaves/w1_ds2760.h
+++ b/drivers/w1/slaves/w1_ds2760.h
@@ -25,6 +25,10 @@
#define DS2760_PROTECTION_REG 0x00
#define DS2760_STATUS_REG 0x01
+ #define DS2760_STATUS_IE (1 << 2)
+ #define DS2760_STATUS_SWEN (1 << 3)
+ #define DS2760_STATUS_RNAOP (1 << 4)
+ #define DS2760_STATUS_PMOD (1 << 5)
#define DS2760_EEPROM_REG 0x07
#define DS2760_SPECIAL_FEATURE_REG 0x08
#define DS2760_VOLTAGE_MSB 0x0c
@@ -38,6 +42,7 @@
#define DS2760_EEPROM_BLOCK0 0x20
#define DS2760_ACTIVE_FULL 0x20
#define DS2760_EEPROM_BLOCK1 0x30
+#define DS2760_STATUS_WRITE_REG 0x31
#define DS2760_RATED_CAPACITY 0x32
#define DS2760_CURRENT_OFFSET_BIAS 0x33
#define DS2760_ACTIVE_EMPTY 0x3b
@@ -46,5 +51,7 @@
size_t count);
extern int w1_ds2760_write(struct device *dev, char *buf, int addr,
size_t count);
+extern int w1_ds2760_store_eeprom(struct device *dev, int addr);
+extern int w1_ds2760_recall_eeprom(struct device *dev, int addr);
#endif /* !__w1_ds2760_h__ */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b166f28..b1ccc04 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -240,6 +240,32 @@
To compile this driver as a module, choose M here: the
module will be called orion_wdt.
+config COH901327_WATCHDOG
+ bool "ST-Ericsson COH 901 327 watchdog"
+ depends on ARCH_U300
+ default y if MACH_U300
+ help
+ Say Y here to include Watchdog timer support for the
+ watchdog embedded into the ST-Ericsson U300 series platforms.
+ This watchdog is used to reset the system and thus cannot be
+ compiled as a module.
+
+config TWL4030_WATCHDOG
+ tristate "TWL4030 Watchdog"
+ depends on TWL4030_CORE
+ help
+ Support for TI TWL4030 watchdog. Say 'Y' here to enable the
+ watchdog timer support for TWL4030 chips.
+
+config STMP3XXX_WATCHDOG
+ tristate "Freescale STMP3XXX watchdog"
+ depends on ARCH_STMP3XXX
+ help
+ Say Y here if to include support for the watchdog timer
+ for the Sigmatel STMP37XX/378X SoC.
+ To compile this driver as a module, choose M here: the
+ module will be called stmp3xxx_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -703,6 +729,12 @@
# MIPS Architecture
+config BCM47XX_WDT
+ tristate "Broadcom BCM47xx Watchdog Timer"
+ depends on BCM47XX
+ help
+ Hardware driver for the Broadcom BCM47xx Watchog Timer.
+
config RC32434_WDT
tristate "IDT RC32434 SoC Watchdog Timer"
depends on MIKROTIK_RB532
@@ -729,6 +761,15 @@
Hardware driver for the MTX-1 boards. This is a watchdog timer that
will reboot the machine after a 100 seconds timer expired.
+config PNX833X_WDT
+ tristate "PNX833x Hardware Watchdog"
+ depends on SOC_PNX8335
+ help
+ Hardware driver for the PNX833x's watchdog. This is a
+ watchdog timer that will reboot the machine after a programable
+ timer has expired and no process has written to /dev/watchdog during
+ that time.
+
config WDT_RM9K_GPI
tristate "RM9000/GPI hardware watchdog"
depends on CPU_RM9000
@@ -966,24 +1007,16 @@
---help---
If you have a PCI-WDT500/501 watchdog board, say Y here, otherwise N.
- To compile this driver as a module, choose M here: the
- module will be called wdt_pci.
-
-config WDT_501_PCI
- bool "PCI-WDT501 features"
- depends on WDTPCI
- help
- Saying Y here and creating a character special file /dev/temperature
- with major number 10 and minor number 131 ("man mknod") will give
- you a thermometer inside your computer: reading from
- /dev/temperature yields one byte, the temperature in degrees
- Fahrenheit. This works only if you have a PCI-WDT501 watchdog board
- installed.
+ If you have a PCI-WDT501 watchdog board then you can enable the
+ temperature sensor by setting the type parameter to 501.
If you want to enable the Fan Tachometer on the PCI-WDT501, then you
can do this via the tachometer parameter. Only do this if you have a
fan tachometer actually set up.
+ To compile this driver as a module, choose M here: the
+ module will be called wdt_pci.
+
#
# USB-based Watchdog Cards
#
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index c3afa14..3d77429 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -28,6 +28,7 @@
obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o
obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o
obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o
+obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o
obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
obj-$(CONFIG_977_WATCHDOG) += wdt977.o
obj-$(CONFIG_IXP2000_WATCHDOG) += ixp2000_wdt.o
@@ -41,6 +42,8 @@
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
+obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
+obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -98,9 +101,11 @@
# M68KNOMMU Architecture
# MIPS Architecture
+obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
+obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
new file mode 100644
index 0000000..5c7011c
--- /dev/null
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -0,0 +1,286 @@
+/*
+ * Watchdog driver for Broadcom BCM47XX
+ *
+ * Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs>
+ * Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/reboot.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/ssb/ssb_embedded.h>
+#include <asm/mach-bcm47xx/bcm47xx.h>
+
+#define DRV_NAME "bcm47xx_wdt"
+
+#define WDT_DEFAULT_TIME 30 /* seconds */
+#define WDT_MAX_TIME 255 /* seconds */
+
+static int wdt_time = WDT_DEFAULT_TIME;
+static int nowayout = WATCHDOG_NOWAYOUT;
+
+module_param(wdt_time, int, 0);
+MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default="
+ __MODULE_STRING(WDT_DEFAULT_TIME) ")");
+
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+#endif
+
+static unsigned long bcm47xx_wdt_busy;
+static char expect_release;
+static struct timer_list wdt_timer;
+static atomic_t ticks;
+
+static inline void bcm47xx_wdt_hw_start(void)
+{
+ /* this is 2,5s on 100Mhz clock and 2s on 133 Mhz */
+ ssb_watchdog_timer_set(&ssb_bcm47xx, 0xfffffff);
+}
+
+static inline int bcm47xx_wdt_hw_stop(void)
+{
+ return ssb_watchdog_timer_set(&ssb_bcm47xx, 0);
+}
+
+static void bcm47xx_timer_tick(unsigned long unused)
+{
+ if (!atomic_dec_and_test(&ticks)) {
+ bcm47xx_wdt_hw_start();
+ mod_timer(&wdt_timer, jiffies + HZ);
+ } else {
+ printk(KERN_CRIT DRV_NAME "Watchdog will fire soon!!!\n");
+ }
+}
+
+static inline void bcm47xx_wdt_pet(void)
+{
+ atomic_set(&ticks, wdt_time);
+}
+
+static void bcm47xx_wdt_start(void)
+{
+ bcm47xx_wdt_pet();
+ bcm47xx_timer_tick(0);
+}
+
+static void bcm47xx_wdt_pause(void)
+{
+ del_timer_sync(&wdt_timer);
+ bcm47xx_wdt_hw_stop();
+}
+
+static void bcm47xx_wdt_stop(void)
+{
+ bcm47xx_wdt_pause();
+}
+
+static int bcm47xx_wdt_settimeout(int new_time)
+{
+ if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
+ return -EINVAL;
+
+ wdt_time = new_time;
+ return 0;
+}
+
+static int bcm47xx_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &bcm47xx_wdt_busy))
+ return -EBUSY;
+
+ bcm47xx_wdt_start();
+ return nonseekable_open(inode, file);
+}
+
+static int bcm47xx_wdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_release == 42) {
+ bcm47xx_wdt_stop();
+ } else {
+ printk(KERN_CRIT DRV_NAME
+ ": Unexpected close, not stopping watchdog!\n");
+ bcm47xx_wdt_start();
+ }
+
+ clear_bit(0, &bcm47xx_wdt_busy);
+ expect_release = 0;
+ return 0;
+}
+
+static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ expect_release = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_release = 42;
+ }
+ }
+ bcm47xx_wdt_pet();
+ }
+ return len;
+}
+
+static struct watchdog_info bcm47xx_wdt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static long bcm47xx_wdt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_value, retval = -EINVAL;;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &bcm47xx_wdt_info,
+ sizeof(bcm47xx_wdt_info)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_value, p))
+ return -EFAULT;
+
+ if (new_value & WDIOS_DISABLECARD) {
+ bcm47xx_wdt_stop();
+ retval = 0;
+ }
+
+ if (new_value & WDIOS_ENABLECARD) {
+ bcm47xx_wdt_start();
+ retval = 0;
+ }
+
+ return retval;
+
+ case WDIOC_KEEPALIVE:
+ bcm47xx_wdt_pet();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_value, p))
+ return -EFAULT;
+
+ if (bcm47xx_wdt_settimeout(new_value))
+ return -EINVAL;
+
+ bcm47xx_wdt_pet();
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt_time, p);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int bcm47xx_wdt_notify_sys(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ bcm47xx_wdt_stop();
+ return NOTIFY_DONE;
+}
+
+static const struct file_operations bcm47xx_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = bcm47xx_wdt_ioctl,
+ .open = bcm47xx_wdt_open,
+ .release = bcm47xx_wdt_release,
+ .write = bcm47xx_wdt_write,
+};
+
+static struct miscdevice bcm47xx_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &bcm47xx_wdt_fops,
+};
+
+static struct notifier_block bcm47xx_wdt_notifier = {
+ .notifier_call = bcm47xx_wdt_notify_sys,
+};
+
+static int __init bcm47xx_wdt_init(void)
+{
+ int ret;
+
+ if (bcm47xx_wdt_hw_stop() < 0)
+ return -ENODEV;
+
+ setup_timer(&wdt_timer, bcm47xx_timer_tick, 0L);
+
+ if (bcm47xx_wdt_settimeout(wdt_time)) {
+ bcm47xx_wdt_settimeout(WDT_DEFAULT_TIME);
+ printk(KERN_INFO DRV_NAME ": "
+ "wdt_time value must be 0 < wdt_time < %d, using %d\n",
+ (WDT_MAX_TIME + 1), wdt_time);
+ }
+
+ ret = register_reboot_notifier(&bcm47xx_wdt_notifier);
+ if (ret)
+ return ret;
+
+ ret = misc_register(&bcm47xx_wdt_miscdev);
+ if (ret) {
+ unregister_reboot_notifier(&bcm47xx_wdt_notifier);
+ return ret;
+ }
+
+ printk(KERN_INFO "BCM47xx Watchdog Timer enabled (%d seconds%s)\n",
+ wdt_time, nowayout ? ", nowayout" : "");
+ return 0;
+}
+
+static void __exit bcm47xx_wdt_exit(void)
+{
+ if (!nowayout)
+ bcm47xx_wdt_stop();
+
+ misc_deregister(&bcm47xx_wdt_miscdev);
+
+ unregister_reboot_notifier(&bcm47xx_wdt_notifier);
+}
+
+module_init(bcm47xx_wdt_init);
+module_exit(bcm47xx_wdt_exit);
+
+MODULE_AUTHOR("Aleksandar Radovanovic");
+MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
new file mode 100644
index 0000000..fecb307
--- /dev/null
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -0,0 +1,537 @@
+/*
+ * coh901327_wdt.c
+ *
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+
+#define DRV_NAME "WDOG COH 901 327"
+
+/*
+ * COH 901 327 register definitions
+ */
+
+/* WDOG_FEED Register 32bit (-/W) */
+#define U300_WDOG_FR 0x00
+#define U300_WDOG_FR_FEED_RESTART_TIMER 0xFEEDU
+/* WDOG_TIMEOUT Register 32bit (R/W) */
+#define U300_WDOG_TR 0x04
+#define U300_WDOG_TR_TIMEOUT_MASK 0x7FFFU
+/* WDOG_DISABLE1 Register 32bit (-/W) */
+#define U300_WDOG_D1R 0x08
+#define U300_WDOG_D1R_DISABLE1_DISABLE_TIMER 0x2BADU
+/* WDOG_DISABLE2 Register 32bit (R/W) */
+#define U300_WDOG_D2R 0x0C
+#define U300_WDOG_D2R_DISABLE2_DISABLE_TIMER 0xCAFEU
+#define U300_WDOG_D2R_DISABLE_STATUS_DISABLED 0xDABEU
+#define U300_WDOG_D2R_DISABLE_STATUS_ENABLED 0x0000U
+/* WDOG_STATUS Register 32bit (R/W) */
+#define U300_WDOG_SR 0x10
+#define U300_WDOG_SR_STATUS_TIMED_OUT 0xCFE8U
+#define U300_WDOG_SR_STATUS_NORMAL 0x0000U
+#define U300_WDOG_SR_RESET_STATUS_RESET 0xE8B4U
+/* WDOG_COUNT Register 32bit (R/-) */
+#define U300_WDOG_CR 0x14
+#define U300_WDOG_CR_VALID_IND 0x8000U
+#define U300_WDOG_CR_VALID_STABLE 0x0000U
+#define U300_WDOG_CR_COUNT_VALUE_MASK 0x7FFFU
+/* WDOG_JTAGOVR Register 32bit (R/W) */
+#define U300_WDOG_JOR 0x18
+#define U300_WDOG_JOR_JTAG_MODE_IND 0x0002U
+#define U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE 0x0001U
+/* WDOG_RESTART Register 32bit (-/W) */
+#define U300_WDOG_RR 0x1C
+#define U300_WDOG_RR_RESTART_VALUE_RESUME 0xACEDU
+/* WDOG_IRQ_EVENT Register 32bit (R/W) */
+#define U300_WDOG_IER 0x20
+#define U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND 0x0001U
+#define U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE 0x0001U
+/* WDOG_IRQ_MASK Register 32bit (R/W) */
+#define U300_WDOG_IMR 0x24
+#define U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE 0x0001U
+/* WDOG_IRQ_FORCE Register 32bit (R/W) */
+#define U300_WDOG_IFR 0x28
+#define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U
+
+/* Default timeout in seconds = 1 minute */
+static int margin = 60;
+static resource_size_t phybase;
+static resource_size_t physize;
+static int irq;
+static void __iomem *virtbase;
+static unsigned long coh901327_users;
+static unsigned long boot_status;
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+static struct device *parent;
+
+/*
+ * The watchdog block is of course always clocked, the
+ * clk_enable()/clk_disable() calls are mainly for performing reference
+ * counting higher up in the clock hierarchy.
+ */
+static struct clk *clk;
+
+/*
+ * Enabling and disabling functions.
+ */
+static void coh901327_enable(u16 timeout)
+{
+ u16 val;
+
+ clk_enable(clk);
+ /* Restart timer if it is disabled */
+ val = readw(virtbase + U300_WDOG_D2R);
+ if (val == U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
+ writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
+ virtbase + U300_WDOG_RR);
+ /* Acknowledge any pending interrupt so it doesn't just fire off */
+ writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
+ virtbase + U300_WDOG_IER);
+ /* Enable the watchdog interrupt */
+ writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR);
+ /* Activate the watchdog timer */
+ writew(timeout, virtbase + U300_WDOG_TR);
+ /* Start the watchdog timer */
+ writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR);
+ /*
+ * Extra read so that this change propagate in the watchdog.
+ */
+ (void) readw(virtbase + U300_WDOG_CR);
+ val = readw(virtbase + U300_WDOG_D2R);
+ clk_disable(clk);
+ if (val != U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
+ dev_err(parent,
+ "%s(): watchdog not enabled! D2R value %04x\n",
+ __func__, val);
+}
+
+static void coh901327_disable(void)
+{
+ u16 val;
+
+ clk_enable(clk);
+ /* Disable the watchdog interrupt if it is active */
+ writew(0x0000U, virtbase + U300_WDOG_IMR);
+ /* If the watchdog is currently enabled, attempt to disable it */
+ val = readw(virtbase + U300_WDOG_D2R);
+ if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) {
+ writew(U300_WDOG_D1R_DISABLE1_DISABLE_TIMER,
+ virtbase + U300_WDOG_D1R);
+ writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
+ virtbase + U300_WDOG_D2R);
+ /* Write this twice (else problems occur) */
+ writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
+ virtbase + U300_WDOG_D2R);
+ }
+ val = readw(virtbase + U300_WDOG_D2R);
+ clk_disable(clk);
+ if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
+ dev_err(parent,
+ "%s(): watchdog not disabled! D2R value %04x\n",
+ __func__, val);
+}
+
+static void coh901327_start(void)
+{
+ coh901327_enable(margin * 100);
+}
+
+static void coh901327_keepalive(void)
+{
+ clk_enable(clk);
+ /* Feed the watchdog */
+ writew(U300_WDOG_FR_FEED_RESTART_TIMER,
+ virtbase + U300_WDOG_FR);
+ clk_disable(clk);
+}
+
+static int coh901327_settimeout(int time)
+{
+ /*
+ * Max margin is 327 since the 10ms
+ * timeout register is max
+ * 0x7FFF = 327670ms ~= 327s.
+ */
+ if (time <= 0 || time > 327)
+ return -EINVAL;
+
+ margin = time;
+ clk_enable(clk);
+ /* Set new timeout value */
+ writew(margin * 100, virtbase + U300_WDOG_TR);
+ /* Feed the dog */
+ writew(U300_WDOG_FR_FEED_RESTART_TIMER,
+ virtbase + U300_WDOG_FR);
+ clk_disable(clk);
+ return 0;
+}
+
+/*
+ * This interrupt occurs 10 ms before the watchdog WILL bark.
+ */
+static irqreturn_t coh901327_interrupt(int irq, void *data)
+{
+ u16 val;
+
+ /*
+ * Ack IRQ? If this occurs we're FUBAR anyway, so
+ * just acknowledge, disable the interrupt and await the imminent end.
+ * If you at some point need a host of callbacks to be called
+ * when the system is about to watchdog-reset, add them here!
+ *
+ * NOTE: on future versions of this IP-block, it will be possible
+ * to prevent a watchdog reset by feeding the watchdog at this
+ * point.
+ */
+ clk_enable(clk);
+ val = readw(virtbase + U300_WDOG_IER);
+ if (val == U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND)
+ writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
+ virtbase + U300_WDOG_IER);
+ writew(0x0000U, virtbase + U300_WDOG_IMR);
+ clk_disable(clk);
+ dev_crit(parent, "watchdog is barking!\n");
+ return IRQ_HANDLED;
+}
+
+/*
+ * Allow only one user (daemon) to open the watchdog
+ */
+static int coh901327_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(1, &coh901327_users))
+ return -EBUSY;
+ coh901327_start();
+ return nonseekable_open(inode, file);
+}
+
+static int coh901327_release(struct inode *inode, struct file *file)
+{
+ clear_bit(1, &coh901327_users);
+ coh901327_disable();
+ return 0;
+}
+
+static ssize_t coh901327_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (len)
+ coh901327_keepalive();
+ return len;
+}
+
+static long coh901327_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -ENOTTY;
+ u16 val;
+ int time;
+ int new_options;
+ union {
+ struct watchdog_info __user *ident;
+ int __user *i;
+ } uarg;
+ static struct watchdog_info ident = {
+ .options = WDIOF_CARDRESET |
+ WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING,
+ .identity = "COH 901 327 Watchdog",
+ .firmware_version = 1,
+ };
+ uarg.i = (int __user *)arg;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user(uarg.ident, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ ret = put_user(0, uarg.i);
+ break;
+
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(boot_status, uarg.i);
+ break;
+
+ case WDIOC_SETOPTIONS:
+ ret = get_user(new_options, uarg.i);
+ if (ret)
+ break;
+ if (new_options & WDIOS_DISABLECARD)
+ coh901327_disable();
+ if (new_options & WDIOS_ENABLECARD)
+ coh901327_start();
+ ret = 0;
+ break;
+
+ case WDIOC_KEEPALIVE:
+ coh901327_keepalive();
+ ret = 0;
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ ret = get_user(time, uarg.i);
+ if (ret)
+ break;
+
+ ret = coh901327_settimeout(time);
+ if (ret)
+ break;
+ /* Then fall through to return set value */
+
+ case WDIOC_GETTIMEOUT:
+ ret = put_user(margin, uarg.i);
+ break;
+
+ case WDIOC_GETTIMELEFT:
+ clk_enable(clk);
+ /* Read repeatedly until the value is stable! */
+ val = readw(virtbase + U300_WDOG_CR);
+ while (val & U300_WDOG_CR_VALID_IND)
+ val = readw(virtbase + U300_WDOG_CR);
+ val &= U300_WDOG_CR_COUNT_VALUE_MASK;
+ clk_disable(clk);
+ if (val != 0)
+ val /= 100;
+ ret = put_user(val, uarg.i);
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations coh901327_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = coh901327_write,
+ .unlocked_ioctl = coh901327_ioctl,
+ .open = coh901327_open,
+ .release = coh901327_release,
+};
+
+static struct miscdevice coh901327_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &coh901327_fops,
+};
+
+static int __exit coh901327_remove(struct platform_device *pdev)
+{
+ misc_deregister(&coh901327_miscdev);
+ coh901327_disable();
+ free_irq(irq, pdev);
+ clk_put(clk);
+ iounmap(virtbase);
+ release_mem_region(phybase, physize);
+ return 0;
+}
+
+
+static int __init coh901327_probe(struct platform_device *pdev)
+{
+ int ret;
+ u16 val;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ parent = &pdev->dev;
+ physize = resource_size(res);
+ phybase = res->start;
+
+ if (request_mem_region(phybase, physize, DRV_NAME) == NULL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ virtbase = ioremap(phybase, physize);
+ if (!virtbase) {
+ ret = -ENOMEM;
+ goto out_no_remap;
+ }
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev, "could not get clock\n");
+ goto out_no_clk;
+ }
+ ret = clk_enable(clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable clock\n");
+ goto out_no_clk_enable;
+ }
+
+ val = readw(virtbase + U300_WDOG_SR);
+ switch (val) {
+ case U300_WDOG_SR_STATUS_TIMED_OUT:
+ dev_info(&pdev->dev,
+ "watchdog timed out since last chip reset!\n");
+ boot_status = WDIOF_CARDRESET;
+ /* Status will be cleared below */
+ break;
+ case U300_WDOG_SR_STATUS_NORMAL:
+ dev_info(&pdev->dev,
+ "in normal status, no timeouts have occurred.\n");
+ break;
+ default:
+ dev_info(&pdev->dev,
+ "contains an illegal status code (%08x)\n", val);
+ break;
+ }
+
+ val = readw(virtbase + U300_WDOG_D2R);
+ switch (val) {
+ case U300_WDOG_D2R_DISABLE_STATUS_DISABLED:
+ dev_info(&pdev->dev, "currently disabled.\n");
+ break;
+ case U300_WDOG_D2R_DISABLE_STATUS_ENABLED:
+ dev_info(&pdev->dev,
+ "currently enabled! (disabling it now)\n");
+ coh901327_disable();
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "contains an illegal enable/disable code (%08x)\n",
+ val);
+ break;
+ }
+
+ /* Reset the watchdog */
+ writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR);
+
+ irq = platform_get_irq(pdev, 0);
+ if (request_irq(irq, coh901327_interrupt, IRQF_DISABLED,
+ DRV_NAME " Bark", pdev)) {
+ ret = -EIO;
+ goto out_no_irq;
+ }
+
+ clk_disable(clk);
+
+ ret = misc_register(&coh901327_miscdev);
+ if (ret == 0)
+ dev_info(&pdev->dev,
+ "initialized. timer margin=%d sec\n", margin);
+ else
+ goto out_no_wdog;
+
+ return 0;
+
+out_no_wdog:
+ free_irq(irq, pdev);
+out_no_irq:
+ clk_disable(clk);
+out_no_clk_enable:
+ clk_put(clk);
+out_no_clk:
+ iounmap(virtbase);
+out_no_remap:
+ release_mem_region(phybase, SZ_4K);
+out:
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
+ wdogenablestore = readw(virtbase + U300_WDOG_D2R);
+ /* If watchdog is on, disable it here and now */
+ if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
+ coh901327_disable();
+ return 0;
+}
+
+static int coh901327_resume(struct platform_device *pdev)
+{
+ /* Restore the watchdog interrupt */
+ writew(irqmaskstore, virtbase + U300_WDOG_IMR);
+ if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) {
+ /* Restart the watchdog timer */
+ writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
+ virtbase + U300_WDOG_RR);
+ writew(U300_WDOG_FR_FEED_RESTART_TIMER,
+ virtbase + U300_WDOG_FR);
+ }
+ return 0;
+}
+#else
+#define coh901327_suspend NULL
+#define coh901327_resume NULL
+#endif
+
+/*
+ * Mistreating the watchdog is the only way to perform a software reset of the
+ * system on EMP platforms. So we implement this and export a symbol for it.
+ */
+void coh901327_watchdog_reset(void)
+{
+ /* Enable even if on JTAG too */
+ writew(U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE,
+ virtbase + U300_WDOG_JOR);
+ /*
+ * Timeout = 5s, we have to wait for the watchdog reset to
+ * actually take place: the watchdog will be reloaded with the
+ * default value immediately, so we HAVE to reboot and get back
+ * into the kernel in 30s, or the device will reboot again!
+ * The boot loader will typically deactivate the watchdog, so we
+ * need time enough for the boot loader to get to the point of
+ * deactivating the watchdog before it is shut down by it.
+ *
+ * NOTE: on future versions of the watchdog, this restriction is
+ * gone: the watchdog will be reloaded with a defaul value (1 min)
+ * instead of last value, and you can conveniently set the watchdog
+ * timeout to 10ms (value = 1) without any problems.
+ */
+ coh901327_enable(500);
+ /* Return and await doom */
+}
+
+static struct platform_driver coh901327_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "coh901327_wdog",
+ },
+ .remove = __exit_p(coh901327_remove),
+ .suspend = coh901327_suspend,
+ .resume = coh901327_resume,
+};
+
+static int __init coh901327_init(void)
+{
+ return platform_driver_probe(&coh901327_driver, coh901327_probe);
+}
+module_init(coh901327_init);
+
+static void __exit coh901327_exit(void)
+{
+ platform_driver_unregister(&coh901327_driver);
+}
+module_exit(coh901327_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("COH 901 327 Watchdog");
+
+module_param(margin, int, 0);
+MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index c0b9169..a6c5674 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -120,7 +120,8 @@
static char expect_release;
static unsigned long hpwdt_is_open;
static unsigned int allow_kdump;
-static int hpwdt_nmi_sourcing;
+static unsigned int hpwdt_nmi_sourcing;
+static unsigned int priority; /* hpwdt at end of die_notify list */
static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
@@ -623,7 +624,7 @@
static struct notifier_block die_notifier = {
.notifier_call = hpwdt_pretimeout,
- .priority = 0x7FFFFFFF,
+ .priority = 0,
};
/*
@@ -641,7 +642,8 @@
hpwdt_nmi_sourcing = 1;
else
dev_warn(&dev->dev, "NMI sourcing is disabled. To enable this "
- "functionality you must reboot with nmi_watchdog=0.\n");
+ "functionality you must reboot with nmi_watchdog=0 "
+ "and load the hpwdt driver with priority=1.\n");
}
#else
static void __devinit hpwdt_check_nmi_sourcing(struct pci_dev *dev)
@@ -714,6 +716,14 @@
cmn_regs.u1.rah = 0x0D;
cmn_regs.u1.ral = 0x02;
+ /*
+ * If the priority is set to 1, then we will be put first on the
+ * die notify list to handle a critical NMI. The default is to
+ * be last so other users of the NMI signal can function.
+ */
+ if (priority)
+ die_notifier.priority = 0x7FFFFFFF;
+
retval = register_die_notifier(&die_notifier);
if (retval != 0) {
dev_warn(&dev->dev,
@@ -733,9 +743,11 @@
printk(KERN_INFO
"hp Watchdog Timer Driver: %s"
", timer margin: %d seconds (nowayout=%d)"
- ", allow kernel dump: %s (default = 0/OFF).\n",
+ ", allow kernel dump: %s (default = 0/OFF)"
+ ", priority: %s (default = 0/LAST).\n",
HPWDT_VERSION, soft_margin, nowayout,
- (allow_kdump == 0) ? "OFF" : "ON");
+ (allow_kdump == 0) ? "OFF" : "ON",
+ (priority == 0) ? "LAST" : "FIRST");
return 0;
@@ -798,5 +810,9 @@
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "The hpwdt driver handles NMIs first or last"
+ " (default = 0/Last)\n");
+
module_init(hpwdt_init);
module_exit(hpwdt_cleanup);
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index f271385..3ed571a 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -159,6 +159,7 @@
file->private_data = (void *) wdev;
omap_wdt_set_timeout(wdev);
+ omap_wdt_ping(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
return nonseekable_open(inode, file);
@@ -313,6 +314,9 @@
platform_set_drvdata(pdev, wdev);
+ clk_enable(wdev->ick);
+ clk_enable(wdev->fck);
+
omap_wdt_disable(wdev);
omap_wdt_adjust_timeout(timer_margin);
@@ -332,6 +336,9 @@
/* autogate OCP interface clock */
__raw_writel(0x01, wdev->base + OMAP_WATCHDOG_SYS_CONFIG);
+ clk_disable(wdev->ick);
+ clk_disable(wdev->fck);
+
omap_wdt_dev = pdev;
return 0;
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
new file mode 100644
index 0000000..538ec2c
--- /dev/null
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -0,0 +1,282 @@
+/*
+ * PNX833x Hardware Watchdog Driver
+ * Copyright 2008 NXP Semiconductors
+ * Daniel Laird <daniel.j.laird@nxp.com>
+ * Andre McCurdy <andre.mccurdy@nxp.com>
+ *
+ * Heavily based upon - IndyDog 0.3
+ * A Hardware Watchdog Device for SGI IP22
+ *
+ * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <asm/mach-pnx833x/pnx833x.h>
+
+#define PFX "pnx833x: "
+#define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */
+#define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */
+
+/** CONFIG block */
+#define PNX833X_CONFIG (0x07000U)
+#define PNX833X_CONFIG_CPU_WATCHDOG (0x54)
+#define PNX833X_CONFIG_CPU_WATCHDOG_COMPARE (0x58)
+#define PNX833X_CONFIG_CPU_COUNTERS_CONTROL (0x1c)
+
+/** RESET block */
+#define PNX833X_RESET (0x08000U)
+#define PNX833X_RESET_CONFIG (0x08)
+
+static int pnx833x_wdt_alive;
+
+/* Set default timeout in MHZ.*/
+static int pnx833x_wdt_timeout = (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY);
+module_param(pnx833x_wdt_timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default="
+ __MODULE_STRING(pnx833x_wdt_timeout) "(30 seconds).");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int start_enabled = 1;
+module_param(start_enabled, int, 0);
+MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion "
+ "(default=" __MODULE_STRING(start_enabled) ")");
+
+static void pnx833x_wdt_start(void)
+{
+ /* Enable watchdog causing reset. */
+ PNX833X_REG(PNX833X_RESET + PNX833X_RESET_CONFIG) |= 0x1;
+ /* Set timeout.*/
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout;
+ /* Enable watchdog. */
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_COUNTERS_CONTROL) |= 0x1;
+
+ printk(KERN_INFO PFX "Started watchdog timer.\n");
+}
+
+static void pnx833x_wdt_stop(void)
+{
+ /* Disable watchdog causing reset. */
+ PNX833X_REG(PNX833X_RESET + PNX833X_CONFIG) &= 0xFFFFFFFE;
+ /* Disable watchdog.*/
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_COUNTERS_CONTROL) &= 0xFFFFFFFE;
+
+ printk(KERN_INFO PFX "Stopped watchdog timer.\n");
+}
+
+static void pnx833x_wdt_ping(void)
+{
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout;
+}
+
+/*
+ * Allow only one person to hold it open
+ */
+static int pnx833x_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(0, &pnx833x_wdt_alive))
+ return -EBUSY;
+
+ if (nowayout)
+ __module_get(THIS_MODULE);
+
+ /* Activate timer */
+ if (!start_enabled)
+ pnx833x_wdt_start();
+
+ pnx833x_wdt_ping();
+
+ printk(KERN_INFO "Started watchdog timer.\n");
+
+ return nonseekable_open(inode, file);
+}
+
+static int pnx833x_wdt_release(struct inode *inode, struct file *file)
+{
+ /* Shut off the timer.
+ * Lock it in if it's a module and we defined ...NOWAYOUT */
+ if (!nowayout)
+ pnx833x_wdt_stop(); /* Turn the WDT off */
+
+ clear_bit(0, &pnx833x_wdt_alive);
+ return 0;
+}
+
+static ssize_t pnx833x_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
+{
+ /* Refresh the timer. */
+ if (len)
+ pnx833x_wdt_ping();
+
+ return len;
+}
+
+static long pnx833x_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int options, new_timeout = 0;
+ uint32_t timeout, timeout_left = 0;
+
+ static struct watchdog_info ident = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .firmware_version = 0,
+ .identity = "Hardware Watchdog for PNX833x",
+ };
+
+ switch (cmd) {
+ default:
+ return -ENOTTY;
+
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user((struct watchdog_info *)arg,
+ &ident, sizeof(ident)))
+ return -EFAULT;
+ return 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int *)arg);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(options, (int *)arg))
+ return -EFAULT;
+
+ if (options & WDIOS_DISABLECARD)
+ pnx833x_wdt_stop();
+
+ if (options & WDIOS_ENABLECARD)
+ pnx833x_wdt_start();
+
+ return 0;
+
+ case WDIOC_KEEPALIVE:
+ pnx833x_wdt_ping();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ {
+ if (get_user(new_timeout, (int *)arg))
+ return -EFAULT;
+
+ pnx833x_wdt_timeout = new_timeout;
+ PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = new_timeout;
+ return put_user(new_timeout, (int *)arg);
+ }
+
+ case WDIOC_GETTIMEOUT:
+ timeout = PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG_COMPARE);
+ return put_user(timeout, (int *)arg);
+
+ case WDIOC_GETTIMELEFT:
+ timeout_left = PNX833X_REG(PNX833X_CONFIG +
+ PNX833X_CONFIG_CPU_WATCHDOG);
+ return put_user(timeout_left, (int *)arg);
+
+ }
+}
+
+static int pnx833x_wdt_notify_sys(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ pnx833x_wdt_stop(); /* Turn the WDT off */
+
+ return NOTIFY_DONE;
+}
+
+static const struct file_operations pnx833x_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = pnx833x_wdt_write,
+ .unlocked_ioctl = pnx833x_wdt_ioctl,
+ .open = pnx833x_wdt_open,
+ .release = pnx833x_wdt_release,
+};
+
+static struct miscdevice pnx833x_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &pnx833x_wdt_fops,
+};
+
+static struct notifier_block pnx833x_wdt_notifier = {
+ .notifier_call = pnx833x_wdt_notify_sys,
+};
+
+static char banner[] __initdata =
+ KERN_INFO PFX "Hardware Watchdog Timer for PNX833x: Version 0.1\n";
+
+static int __init watchdog_init(void)
+{
+ int ret, cause;
+
+ /* Lets check the reason for the reset.*/
+ cause = PNX833X_REG(PNX833X_RESET);
+ /*If bit 31 is set then watchdog was cause of reset.*/
+ if (cause & 0x80000000) {
+ printk(KERN_INFO PFX "The system was previously reset due to "
+ "the watchdog firing - please investigate...\n");
+ }
+
+ ret = register_reboot_notifier(&pnx833x_wdt_notifier);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "cannot register reboot notifier (err=%d)\n", ret);
+ return ret;
+ }
+
+ ret = misc_register(&pnx833x_wdt_miscdev);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "cannot register miscdev on minor=%d (err=%d)\n",
+ WATCHDOG_MINOR, ret);
+ unregister_reboot_notifier(&pnx833x_wdt_notifier);
+ return ret;
+ }
+
+ printk(banner);
+ if (start_enabled)
+ pnx833x_wdt_start();
+
+ return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+ misc_deregister(&pnx833x_wdt_miscdev);
+ unregister_reboot_notifier(&pnx833x_wdt_notifier);
+}
+
+module_init(watchdog_init);
+module_exit(watchdog_exit);
+
+MODULE_AUTHOR("Daniel Laird/Andre McCurdy");
+MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c
new file mode 100644
index 0000000..5dd9526
--- /dev/null
+++ b/drivers/watchdog/stmp3xxx_wdt.c
@@ -0,0 +1,296 @@
+/*
+ * Watchdog driver for Freescale STMP37XX/STMP378X
+ *
+ * Author: Vitaly Wool <vital@embeddedalley.com>
+ *
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <mach/platform.h>
+#include <mach/regs-rtc.h>
+
+#define DEFAULT_HEARTBEAT 19
+#define MAX_HEARTBEAT (0x10000000 >> 6)
+
+/* missing bitmask in headers */
+#define BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER 0x80000000
+
+#define WDT_IN_USE 0
+#define WDT_OK_TO_CLOSE 1
+
+#define WDOG_COUNTER_RATE 1000 /* 1 kHz clock */
+
+static DEFINE_SPINLOCK(stmp3xxx_wdt_io_lock);
+static unsigned long wdt_status;
+static const int nowayout = WATCHDOG_NOWAYOUT;
+static int heartbeat = DEFAULT_HEARTBEAT;
+static unsigned long boot_status;
+
+static void wdt_enable(u32 value)
+{
+ spin_lock(&stmp3xxx_wdt_io_lock);
+ __raw_writel(value, REGS_RTC_BASE + HW_RTC_WATCHDOG);
+ stmp3xxx_setl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL);
+ stmp3xxx_setl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
+ REGS_RTC_BASE + HW_RTC_PERSISTENT1);
+ spin_unlock(&stmp3xxx_wdt_io_lock);
+}
+
+static void wdt_disable(void)
+{
+ spin_lock(&stmp3xxx_wdt_io_lock);
+ stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
+ REGS_RTC_BASE + HW_RTC_PERSISTENT1);
+ stmp3xxx_clearl(BM_RTC_CTRL_WATCHDOGEN, REGS_RTC_BASE + HW_RTC_CTRL);
+ spin_unlock(&stmp3xxx_wdt_io_lock);
+}
+
+static void wdt_ping(void)
+{
+ wdt_enable(heartbeat * WDOG_COUNTER_RATE);
+}
+
+static int stmp3xxx_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_IN_USE, &wdt_status))
+ return -EBUSY;
+
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ wdt_ping();
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t stmp3xxx_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ }
+ }
+ wdt_ping();
+ }
+
+ return len;
+}
+
+static struct watchdog_info ident = {
+ .options = WDIOF_CARDRESET |
+ WDIOF_MAGICCLOSE |
+ WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING,
+ .identity = "STMP3XXX Watchdog",
+};
+
+static long stmp3xxx_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_heartbeat, opts;
+ int ret = -ENOTTY;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ ret = put_user(0, p);
+ break;
+
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(boot_status, p);
+ break;
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(opts, p)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (opts & WDIOS_DISABLECARD)
+ wdt_disable();
+ else if (opts & WDIOS_ENABLECARD)
+ wdt_ping();
+ else {
+ pr_debug("%s: unknown option 0x%x\n", __func__, opts);
+ ret = -EINVAL;
+ break;
+ }
+ ret = 0;
+ break;
+
+ case WDIOC_KEEPALIVE:
+ wdt_ping();
+ ret = 0;
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_heartbeat, p)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (new_heartbeat <= 0 || new_heartbeat > MAX_HEARTBEAT) {
+ ret = -EINVAL;
+ break;
+ }
+
+ heartbeat = new_heartbeat;
+ wdt_ping();
+ /* Fall through */
+
+ case WDIOC_GETTIMEOUT:
+ ret = put_user(heartbeat, p);
+ break;
+ }
+ return ret;
+}
+
+static int stmp3xxx_wdt_release(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ if (!nowayout) {
+ if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
+ wdt_ping();
+ pr_debug("%s: Device closed unexpectdly\n", __func__);
+ ret = -EINVAL;
+ } else {
+ wdt_disable();
+ clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ }
+ }
+ clear_bit(WDT_IN_USE, &wdt_status);
+
+ return ret;
+}
+
+static const struct file_operations stmp3xxx_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = stmp3xxx_wdt_write,
+ .unlocked_ioctl = stmp3xxx_wdt_ioctl,
+ .open = stmp3xxx_wdt_open,
+ .release = stmp3xxx_wdt_release,
+};
+
+static struct miscdevice stmp3xxx_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &stmp3xxx_wdt_fops,
+};
+
+static int __devinit stmp3xxx_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
+ heartbeat = DEFAULT_HEARTBEAT;
+
+ boot_status = __raw_readl(REGS_RTC_BASE + HW_RTC_PERSISTENT1) &
+ BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER;
+ boot_status = !!boot_status;
+ stmp3xxx_clearl(BV_RTC_PERSISTENT1_GENERAL__RTC_FORCE_UPDATER,
+ REGS_RTC_BASE + HW_RTC_PERSISTENT1);
+ wdt_disable(); /* disable for now */
+
+ ret = misc_register(&stmp3xxx_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot register misc device\n");
+ return ret;
+ }
+
+ printk(KERN_INFO "stmp3xxx watchdog: initialized, heartbeat %d sec\n",
+ heartbeat);
+
+ return ret;
+}
+
+static int __devexit stmp3xxx_wdt_remove(struct platform_device *pdev)
+{
+ misc_deregister(&stmp3xxx_wdt_miscdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int wdt_suspended;
+static u32 wdt_saved_time;
+
+static int stmp3xxx_wdt_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ if (__raw_readl(REGS_RTC_BASE + HW_RTC_CTRL) &
+ BM_RTC_CTRL_WATCHDOGEN) {
+ wdt_suspended = 1;
+ wdt_saved_time = __raw_readl(REGS_RTC_BASE + HW_RTC_WATCHDOG);
+ wdt_disable();
+ }
+ return 0;
+}
+
+static int stmp3xxx_wdt_resume(struct platform_device *pdev)
+{
+ if (wdt_suspended) {
+ wdt_enable(wdt_saved_time);
+ wdt_suspended = 0;
+ }
+ return 0;
+}
+#else
+#define stmp3xxx_wdt_suspend NULL
+#define stmp3xxx_wdt_resume NULL
+#endif
+
+static struct platform_driver platform_wdt_driver = {
+ .driver = {
+ .name = "stmp3xxx_wdt",
+ },
+ .probe = stmp3xxx_wdt_probe,
+ .remove = __devexit_p(stmp3xxx_wdt_remove),
+ .suspend = stmp3xxx_wdt_suspend,
+ .resume = stmp3xxx_wdt_resume,
+};
+
+static int __init stmp3xxx_wdt_init(void)
+{
+ return platform_driver_register(&platform_wdt_driver);
+}
+
+static void __exit stmp3xxx_wdt_exit(void)
+{
+ return platform_driver_unregister(&platform_wdt_driver);
+}
+
+module_init(stmp3xxx_wdt_init);
+module_exit(stmp3xxx_wdt_exit);
+
+MODULE_DESCRIPTION("STMP3XXX Watchdog Driver");
+MODULE_LICENSE("GPL");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(MAX_HEARTBEAT) ", default "
+ __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
new file mode 100644
index 0000000..cb46556
--- /dev/null
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) Nokia Corporation
+ *
+ * Written by Timo Kokkonen <timo.t.kokkonen at nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/watchdog.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/i2c/twl4030.h>
+
+#define TWL4030_WATCHDOG_CFG_REG_OFFS 0x3
+
+#define TWL4030_WDT_STATE_OPEN 0x1
+#define TWL4030_WDT_STATE_ACTIVE 0x8
+
+static struct platform_device *twl4030_wdt_dev;
+
+struct twl4030_wdt {
+ struct miscdevice miscdev;
+ int timer_margin;
+ unsigned long state;
+};
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int twl4030_wdt_write(unsigned char val)
+{
+ return twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, val,
+ TWL4030_WATCHDOG_CFG_REG_OFFS);
+}
+
+static int twl4030_wdt_enable(struct twl4030_wdt *wdt)
+{
+ return twl4030_wdt_write(wdt->timer_margin + 1);
+}
+
+static int twl4030_wdt_disable(struct twl4030_wdt *wdt)
+{
+ return twl4030_wdt_write(0);
+}
+
+static int twl4030_wdt_set_timeout(struct twl4030_wdt *wdt, int timeout)
+{
+ if (timeout < 0 || timeout > 30) {
+ dev_warn(wdt->miscdev.parent,
+ "Timeout can only be in the range [0-30] seconds");
+ return -EINVAL;
+ }
+ wdt->timer_margin = timeout;
+ return twl4030_wdt_enable(wdt);
+}
+
+static ssize_t twl4030_wdt_write_fop(struct file *file,
+ const char __user *data, size_t len, loff_t *ppos)
+{
+ struct twl4030_wdt *wdt = file->private_data;
+
+ if (len)
+ twl4030_wdt_enable(wdt);
+
+ return len;
+}
+
+static long twl4030_wdt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ int new_margin;
+ struct twl4030_wdt *wdt = file->private_data;
+
+ static const struct watchdog_info twl4030_wd_ident = {
+ .identity = "TWL4030 Watchdog",
+ .options = WDIOF_SETTIMEOUT,
+ .firmware_version = 0,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &twl4030_wd_ident,
+ sizeof(twl4030_wd_ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+
+ case WDIOC_KEEPALIVE:
+ twl4030_wdt_enable(wdt);
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_margin, p))
+ return -EFAULT;
+ if (twl4030_wdt_set_timeout(wdt, new_margin))
+ return -EINVAL;
+ return put_user(wdt->timer_margin, p);
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(wdt->timer_margin, p);
+
+ default:
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int twl4030_wdt_open(struct inode *inode, struct file *file)
+{
+ struct twl4030_wdt *wdt = platform_get_drvdata(twl4030_wdt_dev);
+
+ /* /dev/watchdog can only be opened once */
+ if (test_and_set_bit(0, &wdt->state))
+ return -EBUSY;
+
+ wdt->state |= TWL4030_WDT_STATE_ACTIVE;
+ file->private_data = (void *) wdt;
+
+ twl4030_wdt_enable(wdt);
+ return nonseekable_open(inode, file);
+}
+
+static int twl4030_wdt_release(struct inode *inode, struct file *file)
+{
+ struct twl4030_wdt *wdt = file->private_data;
+ if (nowayout) {
+ dev_alert(wdt->miscdev.parent,
+ "Unexpected close, watchdog still running!\n");
+ twl4030_wdt_enable(wdt);
+ } else {
+ if (twl4030_wdt_disable(wdt))
+ return -EFAULT;
+ wdt->state &= ~TWL4030_WDT_STATE_ACTIVE;
+ }
+
+ clear_bit(0, &wdt->state);
+ return 0;
+}
+
+static const struct file_operations twl4030_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = twl4030_wdt_open,
+ .release = twl4030_wdt_release,
+ .unlocked_ioctl = twl4030_wdt_ioctl,
+ .write = twl4030_wdt_write_fop,
+};
+
+static int __devinit twl4030_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct twl4030_wdt *wdt;
+
+ wdt = kzalloc(sizeof(struct twl4030_wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->state = 0;
+ wdt->timer_margin = 30;
+ wdt->miscdev.parent = &pdev->dev;
+ wdt->miscdev.fops = &twl4030_wdt_fops;
+ wdt->miscdev.minor = WATCHDOG_MINOR;
+ wdt->miscdev.name = "watchdog";
+
+ platform_set_drvdata(pdev, wdt);
+
+ twl4030_wdt_dev = pdev;
+
+ ret = misc_register(&wdt->miscdev);
+ if (ret) {
+ dev_err(wdt->miscdev.parent,
+ "Failed to register misc device\n");
+ platform_set_drvdata(pdev, NULL);
+ kfree(wdt);
+ twl4030_wdt_dev = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+static int __devexit twl4030_wdt_remove(struct platform_device *pdev)
+{
+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
+
+ if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
+ if (twl4030_wdt_disable(wdt))
+ return -EFAULT;
+
+ wdt->state &= ~TWL4030_WDT_STATE_ACTIVE;
+ misc_deregister(&wdt->miscdev);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(wdt);
+ twl4030_wdt_dev = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int twl4030_wdt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
+ if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
+ return twl4030_wdt_disable(wdt);
+
+ return 0;
+}
+
+static int twl4030_wdt_resume(struct platform_device *pdev)
+{
+ struct twl4030_wdt *wdt = platform_get_drvdata(pdev);
+ if (wdt->state & TWL4030_WDT_STATE_ACTIVE)
+ return twl4030_wdt_enable(wdt);
+
+ return 0;
+}
+#else
+#define twl4030_wdt_suspend NULL
+#define twl4030_wdt_resume NULL
+#endif
+
+static struct platform_driver twl4030_wdt_driver = {
+ .probe = twl4030_wdt_probe,
+ .remove = __devexit_p(twl4030_wdt_remove),
+ .suspend = twl4030_wdt_suspend,
+ .resume = twl4030_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "twl4030_wdt",
+ },
+};
+
+static int __devinit twl4030_wdt_init(void)
+{
+ return platform_driver_register(&twl4030_wdt_driver);
+}
+module_init(twl4030_wdt_init);
+
+static void __devexit twl4030_wdt_exit(void)
+{
+ platform_driver_unregister(&twl4030_wdt_driver);
+}
+module_exit(twl4030_wdt_exit);
+
+MODULE_AUTHOR("Nokia Corporation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:twl4030_wdt");
+
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index a4fe7a3..3bde56b 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -218,16 +218,14 @@
*/
static int wdrtas_get_temperature(void)
{
- long result;
+ int result;
int temperature = 0;
- result = rtas_call(wdrtas_token_get_sensor_state, 2, 2,
- (void *)__pa(&temperature),
- WDRTAS_THERMAL_SENSOR, 0);
+ result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature);
if (result < 0)
printk(KERN_WARNING "wdrtas: reading the thermal sensor "
- "faild: %li\n", result);
+ "failed: %i\n", result);
else
temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index c45839a..7a1bdc7 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -2,7 +2,7 @@
* Industrial Computer Source PCI-WDT500/501 driver
*
* (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
- * All Rights Reserved.
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -99,14 +99,16 @@
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#ifdef CONFIG_WDT_501_PCI
/* Support for the Fan Tachometer on the PCI-WDT501 */
static int tachometer;
-
module_param(tachometer, int, 0);
MODULE_PARM_DESC(tachometer,
- "PCI-WDT501 Fan Tachometer support (0=disable, default=0)");
-#endif /* CONFIG_WDT_501_PCI */
+ "PCI-WDT501 Fan Tachometer support (0=disable, default=0)");
+
+static int type = 500;
+module_param(type, int, 0);
+MODULE_PARM_DESC(type,
+ "PCI-WDT501 Card type (500 or 501 , default=500)");
/*
* Programming support
@@ -266,22 +268,21 @@
*status |= WDIOF_EXTERN1;
if (new_status & WDC_SR_ISII1)
*status |= WDIOF_EXTERN2;
-#ifdef CONFIG_WDT_501_PCI
- if (!(new_status & WDC_SR_TGOOD))
- *status |= WDIOF_OVERHEAT;
- if (!(new_status & WDC_SR_PSUOVER))
- *status |= WDIOF_POWEROVER;
- if (!(new_status & WDC_SR_PSUUNDR))
- *status |= WDIOF_POWERUNDER;
- if (tachometer) {
- if (!(new_status & WDC_SR_FANGOOD))
- *status |= WDIOF_FANFAULT;
+ if (type == 501) {
+ if (!(new_status & WDC_SR_TGOOD))
+ *status |= WDIOF_OVERHEAT;
+ if (!(new_status & WDC_SR_PSUOVER))
+ *status |= WDIOF_POWEROVER;
+ if (!(new_status & WDC_SR_PSUUNDR))
+ *status |= WDIOF_POWERUNDER;
+ if (tachometer) {
+ if (!(new_status & WDC_SR_FANGOOD))
+ *status |= WDIOF_FANFAULT;
+ }
}
-#endif /* CONFIG_WDT_501_PCI */
return 0;
}
-#ifdef CONFIG_WDT_501_PCI
/**
* wdtpci_get_temperature:
*
@@ -300,7 +301,6 @@
*temperature = (c * 11 / 15) + 7;
return 0;
}
-#endif /* CONFIG_WDT_501_PCI */
/**
* wdtpci_interrupt:
@@ -327,22 +327,22 @@
printk(KERN_CRIT PFX "status %d\n", status);
-#ifdef CONFIG_WDT_501_PCI
- if (!(status & WDC_SR_TGOOD)) {
- u8 alarm = inb(WDT_RT);
- printk(KERN_CRIT PFX "Overheat alarm.(%d)\n", alarm);
- udelay(8);
+ if (type == 501) {
+ if (!(status & WDC_SR_TGOOD)) {
+ printk(KERN_CRIT PFX "Overheat alarm.(%d)\n",
+ inb(WDT_RT));
+ udelay(8);
+ }
+ if (!(status & WDC_SR_PSUOVER))
+ printk(KERN_CRIT PFX "PSU over voltage.\n");
+ if (!(status & WDC_SR_PSUUNDR))
+ printk(KERN_CRIT PFX "PSU under voltage.\n");
+ if (tachometer) {
+ if (!(status & WDC_SR_FANGOOD))
+ printk(KERN_CRIT PFX "Possible fan fault.\n");
+ }
}
- if (!(status & WDC_SR_PSUOVER))
- printk(KERN_CRIT PFX "PSU over voltage.\n");
- if (!(status & WDC_SR_PSUUNDR))
- printk(KERN_CRIT PFX "PSU under voltage.\n");
- if (tachometer) {
- if (!(status & WDC_SR_FANGOOD))
- printk(KERN_CRIT PFX "Possible fan fault.\n");
- }
-#endif /* CONFIG_WDT_501_PCI */
- if (!(status&WDC_SR_WCCR)) {
+ if (!(status & WDC_SR_WCCR)) {
#ifdef SOFTWARE_REBOOT
#ifdef ONLY_TESTING
printk(KERN_CRIT PFX "Would Reboot.\n");
@@ -371,12 +371,13 @@
*/
static ssize_t wdtpci_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
if (count) {
if (!nowayout) {
size_t i;
+ /* In case it was set long ago */
expect_close = 0;
for (i = 0; i != count; i++) {
@@ -406,10 +407,10 @@
static long wdtpci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- int new_heartbeat;
- int status;
void __user *argp = (void __user *)arg;
int __user *p = argp;
+ int new_heartbeat;
+ int status;
static struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT|
@@ -421,11 +422,12 @@
/* Add options according to the card we have */
ident.options |= (WDIOF_EXTERN1|WDIOF_EXTERN2);
-#ifdef CONFIG_WDT_501_PCI
- ident.options |= (WDIOF_OVERHEAT|WDIOF_POWERUNDER|WDIOF_POWEROVER);
- if (tachometer)
- ident.options |= WDIOF_FANFAULT;
-#endif /* CONFIG_WDT_501_PCI */
+ if (type == 501) {
+ ident.options |= (WDIOF_OVERHEAT|WDIOF_POWERUNDER|
+ WDIOF_POWEROVER);
+ if (tachometer)
+ ident.options |= WDIOF_FANFAULT;
+ }
switch (cmd) {
case WDIOC_GETSUPPORT:
@@ -503,7 +505,6 @@
return 0;
}
-#ifdef CONFIG_WDT_501_PCI
/**
* wdtpci_temp_read:
* @file: file handle to the watchdog board
@@ -554,7 +555,6 @@
{
return 0;
}
-#endif /* CONFIG_WDT_501_PCI */
/**
* notify_sys:
@@ -596,7 +596,6 @@
.fops = &wdtpci_fops,
};
-#ifdef CONFIG_WDT_501_PCI
static const struct file_operations wdtpci_temp_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -610,7 +609,6 @@
.name = "temperature",
.fops = &wdtpci_temp_fops,
};
-#endif /* CONFIG_WDT_501_PCI */
/*
* The WDT card needs to learn about soft shutdowns in order to
@@ -633,6 +631,11 @@
return -ENODEV;
}
+ if (type != 500 && type != 501) {
+ printk(KERN_ERR PFX "unknown card type '%d'.\n", type);
+ return -ENODEV;
+ }
+
if (pci_enable_device(dev)) {
printk(KERN_ERR PFX "Not possible to enable PCI Device\n");
return -ENODEV;
@@ -678,15 +681,15 @@
goto out_irq;
}
-#ifdef CONFIG_WDT_501_PCI
- ret = misc_register(&temp_miscdev);
- if (ret) {
- printk(KERN_ERR PFX
+ if (type == 501) {
+ ret = misc_register(&temp_miscdev);
+ if (ret) {
+ printk(KERN_ERR PFX
"cannot register miscdev on minor=%d (err=%d)\n",
- TEMP_MINOR, ret);
- goto out_rbt;
+ TEMP_MINOR, ret);
+ goto out_rbt;
+ }
}
-#endif /* CONFIG_WDT_501_PCI */
ret = misc_register(&wdtpci_miscdev);
if (ret) {
@@ -698,20 +701,18 @@
printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
-#ifdef CONFIG_WDT_501_PCI
- printk(KERN_INFO "wdt: Fan Tachometer is %s\n",
+ if (type == 501)
+ printk(KERN_INFO "wdt: Fan Tachometer is %s\n",
(tachometer ? "Enabled" : "Disabled"));
-#endif /* CONFIG_WDT_501_PCI */
ret = 0;
out:
return ret;
out_misc:
-#ifdef CONFIG_WDT_501_PCI
- misc_deregister(&temp_miscdev);
+ if (type == 501)
+ misc_deregister(&temp_miscdev);
out_rbt:
-#endif /* CONFIG_WDT_501_PCI */
unregister_reboot_notifier(&wdtpci_notifier);
out_irq:
free_irq(irq, &wdtpci_miscdev);
@@ -728,9 +729,8 @@
/* here we assume only one device will ever have
* been picked up and registered by probe function */
misc_deregister(&wdtpci_miscdev);
-#ifdef CONFIG_WDT_501_PCI
- misc_deregister(&temp_miscdev);
-#endif /* CONFIG_WDT_501_PCI */
+ if (type == 501)
+ misc_deregister(&temp_miscdev);
unregister_reboot_notifier(&wdtpci_notifier);
free_irq(irq, &wdtpci_miscdev);
release_region(io, 16);
diff --git a/fs/Kconfig b/fs/Kconfig
index d78e950..a97263b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -236,10 +236,12 @@
config LOCKD
tristate
+ depends on FILE_LOCKING
config LOCKD_V4
bool
depends on NFSD_V3 || NFS_V3
+ depends on FILE_LOCKING
default y
config EXPORTFS
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 6039725..f128427 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -29,51 +29,28 @@
#ifdef CONFIG_FS_POSIX_ACL
-static void btrfs_update_cached_acl(struct inode *inode,
- struct posix_acl **p_acl,
- struct posix_acl *acl)
-{
- spin_lock(&inode->i_lock);
- if (*p_acl && *p_acl != BTRFS_ACL_NOT_CACHED)
- posix_acl_release(*p_acl);
- *p_acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
-}
-
static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
{
int size;
const char *name;
char *value = NULL;
- struct posix_acl *acl = NULL, **p_acl;
+ struct posix_acl *acl;
+
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
switch (type) {
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
- p_acl = &BTRFS_I(inode)->i_acl;
break;
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
- p_acl = &BTRFS_I(inode)->i_default_acl;
break;
default:
- return ERR_PTR(-EINVAL);
+ BUG();
}
- /* Handle the cached NULL acl case without locking */
- acl = ACCESS_ONCE(*p_acl);
- if (!acl)
- return acl;
-
- spin_lock(&inode->i_lock);
- acl = *p_acl;
- if (acl != BTRFS_ACL_NOT_CACHED)
- acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
-
- if (acl != BTRFS_ACL_NOT_CACHED)
- return acl;
-
size = __btrfs_getxattr(inode, name, "", 0);
if (size > 0) {
value = kzalloc(size, GFP_NOFS);
@@ -82,13 +59,13 @@
size = __btrfs_getxattr(inode, name, value, size);
if (size > 0) {
acl = posix_acl_from_xattr(value, size);
- btrfs_update_cached_acl(inode, p_acl, acl);
+ set_cached_acl(inode, type, acl);
}
kfree(value);
} else if (size == -ENOENT || size == -ENODATA || size == 0) {
/* FIXME, who returns -ENOENT? I think nobody */
acl = NULL;
- btrfs_update_cached_acl(inode, p_acl, acl);
+ set_cached_acl(inode, type, acl);
} else {
acl = ERR_PTR(-EIO);
}
@@ -121,7 +98,6 @@
{
int ret, size = 0;
const char *name;
- struct posix_acl **p_acl;
char *value = NULL;
mode_t mode;
@@ -141,13 +117,11 @@
ret = 0;
inode->i_mode = mode;
name = POSIX_ACL_XATTR_ACCESS;
- p_acl = &BTRFS_I(inode)->i_acl;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
return acl ? -EINVAL : 0;
name = POSIX_ACL_XATTR_DEFAULT;
- p_acl = &BTRFS_I(inode)->i_default_acl;
break;
default:
return -EINVAL;
@@ -172,7 +146,7 @@
kfree(value);
if (!ret)
- btrfs_update_cached_acl(inode, p_acl, acl);
+ set_cached_acl(inode, type, acl);
return ret;
}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index acb4f35..ea1ea0a 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -53,10 +53,6 @@
/* used to order data wrt metadata */
struct btrfs_ordered_inode_tree ordered_tree;
- /* standard acl pointers */
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-
/* for keeping track of orphaned inodes */
struct list_head i_orphan;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 03441a9..2779c2f 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -41,8 +41,6 @@
#define BTRFS_MAGIC "_BHRfS_M"
-#define BTRFS_ACL_NOT_CACHED ((void *)-1)
-
#define BTRFS_MAX_LEVEL 8
#define BTRFS_COMPAT_EXTENT_TREE_V0
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8612b3a..dbe1aab 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2122,10 +2122,8 @@
* any xattrs or acls
*/
maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
- if (!maybe_acls) {
- BTRFS_I(inode)->i_acl = NULL;
- BTRFS_I(inode)->i_default_acl = NULL;
- }
+ if (!maybe_acls)
+ cache_no_acl(inode);
BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
alloc_group_block, 0);
@@ -3141,9 +3139,6 @@
{
struct btrfs_inode *bi = BTRFS_I(inode);
- bi->i_acl = BTRFS_ACL_NOT_CACHED;
- bi->i_default_acl = BTRFS_ACL_NOT_CACHED;
-
bi->generation = 0;
bi->sequence = 0;
bi->last_trans = 0;
@@ -4640,8 +4635,6 @@
ei->last_trans = 0;
ei->logged_trans = 0;
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
- ei->i_acl = BTRFS_ACL_NOT_CACHED;
- ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
INIT_LIST_HEAD(&ei->i_orphan);
INIT_LIST_HEAD(&ei->ordered_operations);
return &ei->vfs_inode;
@@ -4655,13 +4648,6 @@
WARN_ON(!list_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
- if (BTRFS_I(inode)->i_acl &&
- BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
- posix_acl_release(BTRFS_I(inode)->i_acl);
- if (BTRFS_I(inode)->i_default_acl &&
- BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
- posix_acl_release(BTRFS_I(inode)->i_default_acl);
-
/*
* Make sure we're properly removed from the ordered operation
* lists.
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index b486898..3a9b7a5 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -5,7 +5,7 @@
on by default if server supports it). Add forceuid and forcegid
mount options (so that when negotiating unix extensions specifying
which uid mounted does not immediately force the server's reported
-uids to be overridden).
+uids to be overridden). Add support for scope moutn parm.
Version 1.58
------------
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 1b09f16..20692fb 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -49,6 +49,7 @@
#define ASN1_OJI 6 /* Object Identifier */
#define ASN1_OJD 7 /* Object Description */
#define ASN1_EXT 8 /* External */
+#define ASN1_ENUM 10 /* Enumerated */
#define ASN1_SEQ 16 /* Sequence */
#define ASN1_SET 17 /* Set */
#define ASN1_NUMSTR 18 /* Numerical String */
@@ -78,10 +79,12 @@
#define SPNEGO_OID_LEN 7
#define NTLMSSP_OID_LEN 10
#define KRB5_OID_LEN 7
+#define KRB5U2U_OID_LEN 8
#define MSKRB5_OID_LEN 7
static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 };
+static unsigned long KRB5U2U_OID[8] = { 1, 2, 840, 113554, 1, 2, 2, 3 };
static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 };
/*
@@ -122,6 +125,28 @@
return 1;
}
+#if 0 /* will be needed later by spnego decoding/encoding of ntlmssp */
+static unsigned char
+asn1_enum_decode(struct asn1_ctx *ctx, __le32 *val)
+{
+ unsigned char ch;
+
+ if (ctx->pointer >= ctx->end) {
+ ctx->error = ASN1_ERR_DEC_EMPTY;
+ return 0;
+ }
+
+ ch = *(ctx->pointer)++; /* ch has 0xa, ptr points to lenght octet */
+ if ((ch) == ASN1_ENUM) /* if ch value is ENUM, 0xa */
+ *val = *(++(ctx->pointer)); /* value has enum value */
+ else
+ return 0;
+
+ ctx->pointer++;
+ return 1;
+}
+#endif
+
static unsigned char
asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag)
{
@@ -476,10 +501,9 @@
unsigned int cls, con, tag, oidlen, rc;
bool use_ntlmssp = false;
bool use_kerberos = false;
+ bool use_kerberosu2u = false;
bool use_mskerberos = false;
- *secType = NTLM; /* BB eventually make Kerberos or NLTMSSP the default*/
-
/* cifs_dump_mem(" Received SecBlob ", security_blob, length); */
asn1_open(&ctx, security_blob, length);
@@ -515,6 +539,7 @@
return 0;
}
+ /* SPNEGO */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding negTokenInit"));
return 0;
@@ -526,6 +551,7 @@
return 0;
}
+ /* negTokenInit */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding negTokenInit"));
return 0;
@@ -537,6 +563,7 @@
return 0;
}
+ /* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding 2nd part of negTokenInit"));
return 0;
@@ -548,6 +575,7 @@
return 0;
}
+ /* sequence of */
if (asn1_header_decode
(&ctx, &sequence_end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding 2nd part of negTokenInit"));
@@ -560,6 +588,7 @@
return 0;
}
+ /* list of security mechanisms */
while (!asn1_eoc_decode(&ctx, sequence_end)) {
rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag);
if (!rc) {
@@ -576,11 +605,15 @@
if (compare_oid(oid, oidlen, MSKRB5_OID,
MSKRB5_OID_LEN) &&
- !use_kerberos)
+ !use_mskerberos)
use_mskerberos = true;
+ else if (compare_oid(oid, oidlen, KRB5U2U_OID,
+ KRB5U2U_OID_LEN) &&
+ !use_kerberosu2u)
+ use_kerberosu2u = true;
else if (compare_oid(oid, oidlen, KRB5_OID,
KRB5_OID_LEN) &&
- !use_mskerberos)
+ !use_kerberos)
use_kerberos = true;
else if (compare_oid(oid, oidlen, NTLMSSP_OID,
NTLMSSP_OID_LEN))
@@ -593,7 +626,12 @@
}
}
+ /* mechlistMIC */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+ /* Check if we have reached the end of the blob, but with
+ no mechListMic (e.g. NTLMSSP instead of KRB5) */
+ if (ctx.error == ASN1_ERR_DEC_EMPTY)
+ goto decode_negtoken_exit;
cFYI(1, ("Error decoding last part negTokenInit exit3"));
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
@@ -602,6 +640,8 @@
cls, con, tag, end, *end));
return 0;
}
+
+ /* sequence */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit5"));
return 0;
@@ -611,6 +651,7 @@
cls, con, tag, end, *end));
}
+ /* sequence of */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit 7"));
return 0;
@@ -619,6 +660,8 @@
cls, con, tag, end, *end));
return 0;
}
+
+ /* general string */
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
cFYI(1, ("Error decoding last part negTokenInit exit9"));
return 0;
@@ -630,13 +673,13 @@
}
cFYI(1, ("Need to call asn1_octets_decode() function for %s",
ctx.pointer)); /* is this UTF-8 or ASCII? */
-
+decode_negtoken_exit:
if (use_kerberos)
*secType = Kerberos;
else if (use_mskerberos)
*secType = MSKerberos;
else if (use_ntlmssp)
- *secType = NTLMSSP;
+ *secType = RawNTLMSSP;
return 1;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 0d92114..9f669f9 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -333,6 +333,27 @@
kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
}
+static void
+cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
+{
+ seq_printf(s, ",addr=");
+
+ switch (server->addr.sockAddr.sin_family) {
+ case AF_INET:
+ seq_printf(s, "%pI4", &server->addr.sockAddr.sin_addr.s_addr);
+ break;
+ case AF_INET6:
+ seq_printf(s, "%pI6",
+ &server->addr.sockAddr6.sin6_addr.s6_addr);
+ if (server->addr.sockAddr6.sin6_scope_id)
+ seq_printf(s, "%%%u",
+ server->addr.sockAddr6.sin6_scope_id);
+ break;
+ default:
+ seq_printf(s, "(unknown)");
+ }
+}
+
/*
* cifs_show_options() is for displaying mount options in /proc/mounts.
* Not all settable options are displayed but most of the important
@@ -343,83 +364,64 @@
{
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
- struct TCP_Server_Info *server;
cifs_sb = CIFS_SB(m->mnt_sb);
+ tcon = cifs_sb->tcon;
- if (cifs_sb) {
- tcon = cifs_sb->tcon;
- if (tcon) {
- seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
- if (tcon->ses) {
- if (tcon->ses->userName)
- seq_printf(s, ",username=%s",
- tcon->ses->userName);
- if (tcon->ses->domainName)
- seq_printf(s, ",domain=%s",
- tcon->ses->domainName);
- server = tcon->ses->server;
- if (server) {
- seq_printf(s, ",addr=");
- switch (server->addr.sockAddr6.
- sin6_family) {
- case AF_INET6:
- seq_printf(s, "%pI6",
- &server->addr.sockAddr6.sin6_addr);
- break;
- case AF_INET:
- seq_printf(s, "%pI4",
- &server->addr.sockAddr.sin_addr.s_addr);
- break;
- }
- }
- }
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
- !(tcon->unix_ext))
- seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
- !(tcon->unix_ext))
- seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
- if (!tcon->unix_ext) {
- seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
+ seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
+ if (tcon->ses->userName)
+ seq_printf(s, ",username=%s", tcon->ses->userName);
+ if (tcon->ses->domainName)
+ seq_printf(s, ",domain=%s", tcon->ses->domainName);
+
+ seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
+ seq_printf(s, ",forceuid");
+
+ seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
+ seq_printf(s, ",forcegid");
+
+ cifs_show_address(s, tcon->ses->server);
+
+ if (!tcon->unix_ext)
+ seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
cifs_sb->mnt_file_mode,
cifs_sb->mnt_dir_mode);
- }
- if (tcon->seal)
- seq_printf(s, ",seal");
- if (tcon->nocase)
- seq_printf(s, ",nocase");
- if (tcon->retry)
- seq_printf(s, ",hard");
- }
- if (cifs_sb->prepath)
- seq_printf(s, ",prepath=%s", cifs_sb->prepath);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
- seq_printf(s, ",posixpaths");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
- seq_printf(s, ",setuids");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
- seq_printf(s, ",serverino");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
- seq_printf(s, ",directio");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
- seq_printf(s, ",nouser_xattr");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
- seq_printf(s, ",mapchars");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
- seq_printf(s, ",sfu");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- seq_printf(s, ",nobrl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
- seq_printf(s, ",cifsacl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
- seq_printf(s, ",dynperm");
- if (m->mnt_sb->s_flags & MS_POSIXACL)
- seq_printf(s, ",acl");
+ if (tcon->seal)
+ seq_printf(s, ",seal");
+ if (tcon->nocase)
+ seq_printf(s, ",nocase");
+ if (tcon->retry)
+ seq_printf(s, ",hard");
+ if (cifs_sb->prepath)
+ seq_printf(s, ",prepath=%s", cifs_sb->prepath);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+ seq_printf(s, ",posixpaths");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
+ seq_printf(s, ",setuids");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ seq_printf(s, ",serverino");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+ seq_printf(s, ",directio");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ seq_printf(s, ",nouser_xattr");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+ seq_printf(s, ",mapchars");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ seq_printf(s, ",sfu");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ seq_printf(s, ",nobrl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
+ seq_printf(s, ",cifsacl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+ seq_printf(s, ",dynperm");
+ if (m->mnt_sb->s_flags & MS_POSIXACL)
+ seq_printf(s, ",acl");
- seq_printf(s, ",rsize=%d", cifs_sb->rsize);
- seq_printf(s, ",wsize=%d", cifs_sb->wsize);
- }
+ seq_printf(s, ",rsize=%d", cifs_sb->rsize);
+ seq_printf(s, ",wsize=%d", cifs_sb->wsize);
+
return 0;
}
@@ -535,9 +537,14 @@
if (tcon == NULL)
return;
- lock_kernel();
read_lock(&cifs_tcp_ses_lock);
- if (tcon->tc_count == 1)
+ if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
+ /* we have other mounts to same share or we have
+ already tried to force umount this and woken up
+ all waiting network requests, nothing to do */
+ read_unlock(&cifs_tcp_ses_lock);
+ return;
+ } else if (tcon->tc_count == 1)
tcon->tidStatus = CifsExiting;
read_unlock(&cifs_tcp_ses_lock);
@@ -552,9 +559,7 @@
wake_up_all(&tcon->ses->server->response_q);
msleep(1);
}
-/* BB FIXME - finish add checks for tidStatus BB */
- unlock_kernel();
return;
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a61ab77..e1225e6 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -83,7 +83,7 @@
NTLM, /* Legacy NTLM012 auth with NTLM hash */
NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
- NTLMSSP, /* NTLMSSP via SPNEGO, NTLMv2 hash */
+/* NTLMSSP, */ /* can use rawNTLMSSP instead of NTLMSSP via SPNEGO */
Kerberos, /* Kerberos via SPNEGO */
MSKerberos, /* MS Kerberos via SPNEGO */
};
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f945232..c419416 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -74,7 +74,7 @@
extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
enum securityEnum *secType);
-extern int cifs_inet_pton(const int, const char *source, void *dst);
+extern int cifs_convert_address(char *src, void *dst);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b84c61d..61007c6 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -594,7 +594,7 @@
else if (secFlags & CIFSSEC_MAY_KRB5)
server->secType = Kerberos;
else if (secFlags & CIFSSEC_MAY_NTLMSSP)
- server->secType = NTLMSSP;
+ server->secType = RawNTLMSSP;
else if (secFlags & CIFSSEC_MAY_LANMAN)
server->secType = LANMAN;
/* #ifdef CONFIG_CIFS_EXPERIMENTAL
@@ -729,7 +729,7 @@
* the tcon is no longer on the list, so no need to take lock before
* checking this.
*/
- if (tcon->need_reconnect)
+ if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 97f4311..e16d759 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -70,7 +70,6 @@
mode_t file_mode;
mode_t dir_mode;
unsigned secFlg;
- bool rw:1;
bool retry:1;
bool intr:1;
bool setuids:1;
@@ -832,7 +831,6 @@
vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR;
/* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
- vol->rw = true;
/* default is always to request posix paths. */
vol->posix_paths = 1;
/* default to using server inode numbers where available */
@@ -1199,7 +1197,9 @@
} else if (strnicmp(data, "guest", 5) == 0) {
/* ignore */
} else if (strnicmp(data, "rw", 2) == 0) {
- vol->rw = true;
+ /* ignore */
+ } else if (strnicmp(data, "ro", 2) == 0) {
+ /* ignore */
} else if (strnicmp(data, "noblocksend", 11) == 0) {
vol->noblocksnd = 1;
} else if (strnicmp(data, "noautotune", 10) == 0) {
@@ -1218,8 +1218,6 @@
parse these options again and set anything and it
is ok to just ignore them */
continue;
- } else if (strnicmp(data, "ro", 2) == 0) {
- vol->rw = false;
} else if (strnicmp(data, "hard", 4) == 0) {
vol->retry = 1;
} else if (strnicmp(data, "soft", 4) == 0) {
@@ -1386,8 +1384,10 @@
server->addr.sockAddr.sin_addr.s_addr))
continue;
else if (addr->ss_family == AF_INET6 &&
- !ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr,
- &addr6->sin6_addr))
+ (!ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr,
+ &addr6->sin6_addr) ||
+ server->addr.sockAddr6.sin6_scope_id !=
+ addr6->sin6_scope_id))
continue;
++server->srv_count;
@@ -1433,28 +1433,15 @@
memset(&addr, 0, sizeof(struct sockaddr_storage));
+ cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip));
+
if (volume_info->UNCip && volume_info->UNC) {
- rc = cifs_inet_pton(AF_INET, volume_info->UNCip,
- &sin_server->sin_addr.s_addr);
-
- if (rc <= 0) {
- /* not ipv4 address, try ipv6 */
- rc = cifs_inet_pton(AF_INET6, volume_info->UNCip,
- &sin_server6->sin6_addr.in6_u);
- if (rc > 0)
- addr.ss_family = AF_INET6;
- } else {
- addr.ss_family = AF_INET;
- }
-
- if (rc <= 0) {
+ rc = cifs_convert_address(volume_info->UNCip, &addr);
+ if (!rc) {
/* we failed translating address */
rc = -EINVAL;
goto out_err;
}
-
- cFYI(1, ("UNC: %s ip: %s", volume_info->UNC,
- volume_info->UNCip));
} else if (volume_info->UNCip) {
/* BB using ip addr as tcp_ses name to connect to the
DFS root below */
@@ -1513,14 +1500,14 @@
cFYI(1, ("attempting ipv6 connect"));
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
+ sin_server6->sin6_port = htons(volume_info->port);
memcpy(&tcp_ses->addr.sockAddr6, sin_server6,
sizeof(struct sockaddr_in6));
- sin_server6->sin6_port = htons(volume_info->port);
rc = ipv6_connect(tcp_ses);
} else {
+ sin_server->sin_port = htons(volume_info->port);
memcpy(&tcp_ses->addr.sockAddr, sin_server,
sizeof(struct sockaddr_in));
- sin_server->sin_port = htons(volume_info->port);
rc = ipv4_connect(tcp_ses);
}
if (rc < 0) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3758965..7dc6b74 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -307,8 +307,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if (oplockEnabled)
@@ -540,8 +541,9 @@
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
kfree(full_path);
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
rc = CIFSSMBOpen(xid, pTcon, full_path,
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index df4a306..8794814 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -35,26 +35,11 @@
* 0 - name is not IP
*/
static int
-is_ip(const char *name)
+is_ip(char *name)
{
- int rc;
- struct sockaddr_in sin_server;
- struct sockaddr_in6 sin_server6;
+ struct sockaddr_storage ss;
- rc = cifs_inet_pton(AF_INET, name,
- &sin_server.sin_addr.s_addr);
-
- if (rc <= 0) {
- /* not ipv4 address, try ipv6 */
- rc = cifs_inet_pton(AF_INET6, name,
- &sin_server6.sin6_addr.in6_u);
- if (rc > 0)
- return 1;
- } else {
- return 1;
- }
- /* we failed translating address */
- return 0;
+ return cifs_convert_address(name, &ss);
}
static int
@@ -72,7 +57,7 @@
ip[datalen] = '\0';
/* make sure this looks like an address */
- if (!is_ip((const char *) ip)) {
+ if (!is_ip(ip)) {
kfree(ip);
return -EINVAL;
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0686684..ebdbe62 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -300,14 +300,16 @@
pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
pCifsFile = cifs_fill_filedata(file);
if (pCifsFile) {
+ rc = 0;
FreeXid(xid);
- return 0;
+ return rc;
}
full_path = build_path_from_dentry(file->f_path.dentry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
@@ -494,8 +496,9 @@
mutex_unlock(&pCifsFile->fh_mutex);
if (!pCifsFile->invalidHandle) {
mutex_lock(&pCifsFile->fh_mutex);
+ rc = 0;
FreeXid(xid);
- return 0;
+ return rc;
}
if (file->f_path.dentry == NULL) {
@@ -845,8 +848,9 @@
tcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
@@ -1805,8 +1809,9 @@
pTcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
@@ -1885,8 +1890,9 @@
pTcon = cifs_sb->tcon;
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
@@ -2019,8 +2025,9 @@
xid = GetXid();
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
open_file = (struct cifsFileInfo *)file->private_data;
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -2185,8 +2192,9 @@
xid = GetXid();
if (file->private_data == NULL) {
+ rc = -EBADF;
FreeXid(xid);
- return -EBADF;
+ return rc;
}
cFYI(1, ("readpage %p at offset %d 0x%x\n",
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index fad882b..155c9e7 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -988,8 +988,9 @@
* sb->s_vfs_rename_mutex here */
full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -1118,8 +1119,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if ((pTcon->ses->capabilities & CAP_UNIX) &&
@@ -1303,8 +1305,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
@@ -1508,8 +1511,9 @@
since that would deadlock */
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
"jiffies %ld", full_path, direntry->d_inode,
@@ -1911,8 +1915,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/*
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index cd83c53..fc1e048 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -172,8 +172,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
cFYI(1, ("Full path: %s", full_path));
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 32d6baa..bd6d689 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -133,10 +133,12 @@
{0, 0}
};
-/* Convert string containing dotted ip address to binary form */
-/* returns 0 if invalid address */
-
-int
+/*
+ * Convert a string containing text IPv4 or IPv6 address to binary form.
+ *
+ * Returns 0 on failure.
+ */
+static int
cifs_inet_pton(const int address_family, const char *cp, void *dst)
{
int ret = 0;
@@ -153,6 +155,52 @@
return ret;
}
+/*
+ * Try to convert a string to an IPv4 address and then attempt to convert
+ * it to an IPv6 address if that fails. Set the family field if either
+ * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to
+ * treat the part following it as a numeric sin6_scope_id.
+ *
+ * Returns 0 on failure.
+ */
+int
+cifs_convert_address(char *src, void *dst)
+{
+ int rc;
+ char *pct, *endp;
+ struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
+ struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
+
+ /* IPv4 address */
+ if (cifs_inet_pton(AF_INET, src, &s4->sin_addr.s_addr)) {
+ s4->sin_family = AF_INET;
+ return 1;
+ }
+
+ /* temporarily terminate string */
+ pct = strchr(src, '%');
+ if (pct)
+ *pct = '\0';
+
+ rc = cifs_inet_pton(AF_INET6, src, &s6->sin6_addr.s6_addr);
+
+ /* repair temp termination (if any) and make pct point to scopeid */
+ if (pct)
+ *pct++ = '%';
+
+ if (!rc)
+ return rc;
+
+ s6->sin6_family = AF_INET6;
+ if (pct) {
+ s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
+ if (!*pct || *endp)
+ return 0;
+ }
+
+ return rc;
+}
+
/*****************************************************************************
convert a NT status code to a dos class/code
*****************************************************************************/
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 897a052..7085a62 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -802,7 +802,7 @@
#endif /* CONFIG_CIFS_UPCALL */
} else {
#ifdef CONFIG_CIFS_EXPERIMENTAL
- if ((experimEnabled > 1) && (type == RawNTLMSSP)) {
+ if (type == RawNTLMSSP) {
if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
cERROR(1, ("NTLMSSP requires Unicode support"));
rc = -ENOSYS;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index e9527ee..a75afa3 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -64,8 +64,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
if (ea_name == NULL) {
cFYI(1, ("Null xattr names not supported"));
@@ -118,8 +119,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -225,8 +227,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -351,8 +354,9 @@
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
+ rc = -ENOMEM;
FreeXid(xid);
- return -ENOMEM;
+ return rc;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c5ded5f..626c748 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -31,6 +31,7 @@
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/vt.h>
+#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/ppp_defs.h>
@@ -94,7 +95,6 @@
#include <linux/atm_tcp.h>
#include <linux/sonet.h>
#include <linux/atm_suni.h>
-#include <linux/mtd/mtd.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
@@ -1405,46 +1405,6 @@
#define HIDPGETCONNLIST _IOR('H', 210, int)
#define HIDPGETCONNINFO _IOR('H', 211, int)
-struct mtd_oob_buf32 {
- u_int32_t start;
- u_int32_t length;
- compat_caddr_t ptr; /* unsigned char* */
-};
-
-#define MEMWRITEOOB32 _IOWR('M',3,struct mtd_oob_buf32)
-#define MEMREADOOB32 _IOWR('M',4,struct mtd_oob_buf32)
-
-static int mtd_rw_oob(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct mtd_oob_buf __user *buf = compat_alloc_user_space(sizeof(*buf));
- struct mtd_oob_buf32 __user *buf32 = compat_ptr(arg);
- u32 data;
- char __user *datap;
- unsigned int real_cmd;
- int err;
-
- real_cmd = (cmd == MEMREADOOB32) ?
- MEMREADOOB : MEMWRITEOOB;
-
- if (copy_in_user(&buf->start, &buf32->start,
- 2 * sizeof(u32)) ||
- get_user(data, &buf32->ptr))
- return -EFAULT;
- datap = compat_ptr(data);
- if (put_user(datap, &buf->ptr))
- return -EFAULT;
-
- err = sys_ioctl(fd, real_cmd, (unsigned long) buf);
-
- if (!err) {
- if (copy_in_user(&buf32->start, &buf->start,
- 2 * sizeof(u32)))
- err = -EFAULT;
- }
-
- return err;
-}
-
#ifdef CONFIG_BLOCK
struct raw32_config_request
{
@@ -1820,6 +1780,41 @@
return sys_ioctl(fd, cmd, (unsigned long)tn);
}
+/* on ia32 l_start is on a 32-bit boundary */
+#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
+struct space_resv_32 {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start __attribute__((packed));
+ /* len == 0 means until end of file */
+ __s64 l_len __attribute__((packed));
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+};
+
+#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
+#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
+
+/* just account for different alignment */
+static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
+{
+ struct space_resv_32 __user *p32 = (void __user *)arg;
+ struct space_resv __user *p = compat_alloc_user_space(sizeof(*p));
+
+ if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
+ copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
+ copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
+ copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
+ copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
+ copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
+ copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
+ return -EFAULT;
+
+ return ioctl_preallocate(file, p);
+}
+#endif
+
typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int,
unsigned long, struct file *);
@@ -2426,15 +2421,6 @@
COMPATIBLE_IOCTL(USBDEVFS_REAPURB32)
COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32)
COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT)
-/* MTD */
-COMPATIBLE_IOCTL(MEMGETINFO)
-COMPATIBLE_IOCTL(MEMERASE)
-COMPATIBLE_IOCTL(MEMLOCK)
-COMPATIBLE_IOCTL(MEMUNLOCK)
-COMPATIBLE_IOCTL(MEMGETREGIONCOUNT)
-COMPATIBLE_IOCTL(MEMGETREGIONINFO)
-COMPATIBLE_IOCTL(MEMGETBADBLOCK)
-COMPATIBLE_IOCTL(MEMSETBADBLOCK)
/* NBD */
ULONG_IOCTL(NBD_SET_SOCK)
ULONG_IOCTL(NBD_SET_BLKSIZE)
@@ -2544,8 +2530,6 @@
COMPATIBLE_IOCTL(JSIOCGNAME(0))
/* now things that need handlers */
-HANDLE_IOCTL(MEMREADOOB32, mtd_rw_oob)
-HANDLE_IOCTL(MEMWRITEOOB32, mtd_rw_oob)
#ifdef CONFIG_NET
HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
@@ -2808,6 +2792,18 @@
case FIOQSIZE:
break;
+#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
+ case FS_IOC_RESVSP_32:
+ case FS_IOC_RESVSP64_32:
+ error = compat_ioctl_preallocate(filp, arg);
+ goto out_fput;
+#else
+ case FS_IOC_RESVSP:
+ case FS_IOC_RESVSP64:
+ error = ioctl_preallocate(filp, (void __user *)arg);
+ goto out_fput;
+#endif
+
case FIBMAP:
case FIGETBSZ:
case FIONREAD:
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 9b1d285..75efb02 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -423,7 +423,6 @@
}
static struct file_system_type devpts_fs_type = {
- .owner = THIS_MODULE,
.name = "devpts",
.get_sb = devpts_get_sb,
.kill_sb = devpts_kill_sb,
@@ -564,13 +563,4 @@
}
return err;
}
-
-static void __exit exit_devpts_fs(void)
-{
- unregister_filesystem(&devpts_fs_type);
- mntput(devpts_mnt);
-}
-
module_init(init_devpts_fs)
-module_exit(exit_devpts_fs)
-MODULE_LICENSE("GPL");
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index d46e38c..d636e12 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -125,37 +125,12 @@
return ERR_PTR(-EINVAL);
}
-static inline struct posix_acl *
-ext2_iget_acl(struct inode *inode, struct posix_acl **i_acl)
-{
- struct posix_acl *acl = EXT2_ACL_NOT_CACHED;
-
- spin_lock(&inode->i_lock);
- if (*i_acl != EXT2_ACL_NOT_CACHED)
- acl = posix_acl_dup(*i_acl);
- spin_unlock(&inode->i_lock);
-
- return acl;
-}
-
-static inline void
-ext2_iset_acl(struct inode *inode, struct posix_acl **i_acl,
- struct posix_acl *acl)
-{
- spin_lock(&inode->i_lock);
- if (*i_acl != EXT2_ACL_NOT_CACHED)
- posix_acl_release(*i_acl);
- *i_acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
-}
-
/*
* inode->i_mutex: don't care
*/
static struct posix_acl *
ext2_get_acl(struct inode *inode, int type)
{
- struct ext2_inode_info *ei = EXT2_I(inode);
int name_index;
char *value = NULL;
struct posix_acl *acl;
@@ -164,23 +139,19 @@
if (!test_opt(inode->i_sb, POSIX_ACL))
return NULL;
- switch(type) {
- case ACL_TYPE_ACCESS:
- acl = ext2_iget_acl(inode, &ei->i_acl);
- if (acl != EXT2_ACL_NOT_CACHED)
- return acl;
- name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
- break;
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
- case ACL_TYPE_DEFAULT:
- acl = ext2_iget_acl(inode, &ei->i_default_acl);
- if (acl != EXT2_ACL_NOT_CACHED)
- return acl;
- name_index = EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT;
- break;
-
- default:
- return ERR_PTR(-EINVAL);
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ name_index = EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT;
+ break;
+ default:
+ BUG();
}
retval = ext2_xattr_get(inode, name_index, "", NULL, 0);
if (retval > 0) {
@@ -197,17 +168,9 @@
acl = ERR_PTR(retval);
kfree(value);
- if (!IS_ERR(acl)) {
- switch(type) {
- case ACL_TYPE_ACCESS:
- ext2_iset_acl(inode, &ei->i_acl, acl);
- break;
+ if (!IS_ERR(acl))
+ set_cached_acl(inode, type, acl);
- case ACL_TYPE_DEFAULT:
- ext2_iset_acl(inode, &ei->i_default_acl, acl);
- break;
- }
- }
return acl;
}
@@ -217,7 +180,6 @@
static int
ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
- struct ext2_inode_info *ei = EXT2_I(inode);
int name_index;
void *value = NULL;
size_t size = 0;
@@ -263,17 +225,8 @@
error = ext2_xattr_set(inode, name_index, "", value, size, 0);
kfree(value);
- if (!error) {
- switch(type) {
- case ACL_TYPE_ACCESS:
- ext2_iset_acl(inode, &ei->i_acl, acl);
- break;
-
- case ACL_TYPE_DEFAULT:
- ext2_iset_acl(inode, &ei->i_default_acl, acl);
- break;
- }
- }
+ if (!error)
+ set_cached_acl(inode, type, acl);
return error;
}
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index b42cf57..ecefe47 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -53,10 +53,6 @@
#ifdef CONFIG_EXT2_FS_POSIX_ACL
-/* Value for inode->u.ext2_i.i_acl and inode->u.ext2_i.i_default_acl
- if the ACL has not been cached */
-#define EXT2_ACL_NOT_CACHED ((void *)-1)
-
/* acl.c */
extern int ext2_permission (struct inode *, int);
extern int ext2_acl_chmod (struct inode *);
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index d988a71..9a8a8e2 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -47,10 +47,6 @@
*/
struct rw_semaphore xattr_sem;
#endif
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-#endif
rwlock_t i_meta_lock;
/*
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 29ed682..e271303 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1224,10 +1224,6 @@
return inode;
ei = EXT2_I(inode);
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- ei->i_acl = EXT2_ACL_NOT_CACHED;
- ei->i_default_acl = EXT2_ACL_NOT_CACHED;
-#endif
ei->i_block_alloc_info = NULL;
raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 4589996..1a9ffee 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -152,10 +152,6 @@
ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- ei->i_acl = EXT2_ACL_NOT_CACHED;
- ei->i_default_acl = EXT2_ACL_NOT_CACHED;
-#endif
ei->i_block_alloc_info = NULL;
ei->vfs_inode.i_version = 1;
return &ei->vfs_inode;
@@ -198,18 +194,6 @@
static void ext2_clear_inode(struct inode *inode)
{
struct ext2_block_alloc_info *rsv = EXT2_I(inode)->i_block_alloc_info;
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- struct ext2_inode_info *ei = EXT2_I(inode);
-
- if (ei->i_acl && ei->i_acl != EXT2_ACL_NOT_CACHED) {
- posix_acl_release(ei->i_acl);
- ei->i_acl = EXT2_ACL_NOT_CACHED;
- }
- if (ei->i_default_acl && ei->i_default_acl != EXT2_ACL_NOT_CACHED) {
- posix_acl_release(ei->i_default_acl);
- ei->i_default_acl = EXT2_ACL_NOT_CACHED;
- }
-#endif
ext2_discard_reservation(inode);
EXT2_I(inode)->i_block_alloc_info = NULL;
if (unlikely(rsv))
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index e0c7454..e167bae 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -126,33 +126,6 @@
return ERR_PTR(-EINVAL);
}
-static inline struct posix_acl *
-ext3_iget_acl(struct inode *inode, struct posix_acl **i_acl)
-{
- struct posix_acl *acl = ACCESS_ONCE(*i_acl);
-
- if (acl) {
- spin_lock(&inode->i_lock);
- acl = *i_acl;
- if (acl != EXT3_ACL_NOT_CACHED)
- acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
- }
-
- return acl;
-}
-
-static inline void
-ext3_iset_acl(struct inode *inode, struct posix_acl **i_acl,
- struct posix_acl *acl)
-{
- spin_lock(&inode->i_lock);
- if (*i_acl != EXT3_ACL_NOT_CACHED)
- posix_acl_release(*i_acl);
- *i_acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
-}
-
/*
* Inode operation get_posix_acl().
*
@@ -161,7 +134,6 @@
static struct posix_acl *
ext3_get_acl(struct inode *inode, int type)
{
- struct ext3_inode_info *ei = EXT3_I(inode);
int name_index;
char *value = NULL;
struct posix_acl *acl;
@@ -170,24 +142,21 @@
if (!test_opt(inode->i_sb, POSIX_ACL))
return NULL;
- switch(type) {
- case ACL_TYPE_ACCESS:
- acl = ext3_iget_acl(inode, &ei->i_acl);
- if (acl != EXT3_ACL_NOT_CACHED)
- return acl;
- name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
- break;
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
- case ACL_TYPE_DEFAULT:
- acl = ext3_iget_acl(inode, &ei->i_default_acl);
- if (acl != EXT3_ACL_NOT_CACHED)
- return acl;
- name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
- break;
-
- default:
- return ERR_PTR(-EINVAL);
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
+ break;
+ default:
+ BUG();
}
+
retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
if (retval > 0) {
value = kmalloc(retval, GFP_NOFS);
@@ -203,17 +172,9 @@
acl = ERR_PTR(retval);
kfree(value);
- if (!IS_ERR(acl)) {
- switch(type) {
- case ACL_TYPE_ACCESS:
- ext3_iset_acl(inode, &ei->i_acl, acl);
- break;
+ if (!IS_ERR(acl))
+ set_cached_acl(inode, type, acl);
- case ACL_TYPE_DEFAULT:
- ext3_iset_acl(inode, &ei->i_default_acl, acl);
- break;
- }
- }
return acl;
}
@@ -226,7 +187,6 @@
ext3_set_acl(handle_t *handle, struct inode *inode, int type,
struct posix_acl *acl)
{
- struct ext3_inode_info *ei = EXT3_I(inode);
int name_index;
void *value = NULL;
size_t size = 0;
@@ -271,17 +231,10 @@
value, size, 0);
kfree(value);
- if (!error) {
- switch(type) {
- case ACL_TYPE_ACCESS:
- ext3_iset_acl(inode, &ei->i_acl, acl);
- break;
- case ACL_TYPE_DEFAULT:
- ext3_iset_acl(inode, &ei->i_default_acl, acl);
- break;
- }
- }
+ if (!error)
+ set_cached_acl(inode, type, acl);
+
return error;
}
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
index 42da16b..07d15a3 100644
--- a/fs/ext3/acl.h
+++ b/fs/ext3/acl.h
@@ -53,10 +53,6 @@
#ifdef CONFIG_EXT3_FS_POSIX_ACL
-/* Value for inode->u.ext3_i.i_acl and inode->u.ext3_i.i_default_acl
- if the ACL has not been cached */
-#define EXT3_ACL_NOT_CACHED ((void *)-1)
-
/* acl.c */
extern int ext3_permission (struct inode *, int);
extern int ext3_acl_chmod (struct inode *);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 05dea81..5f51fed 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2752,10 +2752,6 @@
return inode;
ei = EXT3_I(inode);
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
- ei->i_acl = EXT3_ACL_NOT_CACHED;
- ei->i_default_acl = EXT3_ACL_NOT_CACHED;
-#endif
ei->i_block_alloc_info = NULL;
ret = __ext3_get_inode_loc(inode, &iloc, 0);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 601e881..524b349 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -464,10 +464,6 @@
ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
if (!ei)
return NULL;
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
- ei->i_acl = EXT3_ACL_NOT_CACHED;
- ei->i_default_acl = EXT3_ACL_NOT_CACHED;
-#endif
ei->i_block_alloc_info = NULL;
ei->vfs_inode.i_version = 1;
return &ei->vfs_inode;
@@ -518,18 +514,6 @@
static void ext3_clear_inode(struct inode *inode)
{
struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info;
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
- if (EXT3_I(inode)->i_acl &&
- EXT3_I(inode)->i_acl != EXT3_ACL_NOT_CACHED) {
- posix_acl_release(EXT3_I(inode)->i_acl);
- EXT3_I(inode)->i_acl = EXT3_ACL_NOT_CACHED;
- }
- if (EXT3_I(inode)->i_default_acl &&
- EXT3_I(inode)->i_default_acl != EXT3_ACL_NOT_CACHED) {
- posix_acl_release(EXT3_I(inode)->i_default_acl);
- EXT3_I(inode)->i_default_acl = EXT3_ACL_NOT_CACHED;
- }
-#endif
ext3_discard_reservation(inode);
EXT3_I(inode)->i_block_alloc_info = NULL;
if (unlikely(rsv))
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 605aeed..f6d8967 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -126,33 +126,6 @@
return ERR_PTR(-EINVAL);
}
-static inline struct posix_acl *
-ext4_iget_acl(struct inode *inode, struct posix_acl **i_acl)
-{
- struct posix_acl *acl = ACCESS_ONCE(*i_acl);
-
- if (acl) {
- spin_lock(&inode->i_lock);
- acl = *i_acl;
- if (acl != EXT4_ACL_NOT_CACHED)
- acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
- }
-
- return acl;
-}
-
-static inline void
-ext4_iset_acl(struct inode *inode, struct posix_acl **i_acl,
- struct posix_acl *acl)
-{
- spin_lock(&inode->i_lock);
- if (*i_acl != EXT4_ACL_NOT_CACHED)
- posix_acl_release(*i_acl);
- *i_acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
-}
-
/*
* Inode operation get_posix_acl().
*
@@ -161,7 +134,6 @@
static struct posix_acl *
ext4_get_acl(struct inode *inode, int type)
{
- struct ext4_inode_info *ei = EXT4_I(inode);
int name_index;
char *value = NULL;
struct posix_acl *acl;
@@ -170,23 +142,19 @@
if (!test_opt(inode->i_sb, POSIX_ACL))
return NULL;
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
+
switch (type) {
case ACL_TYPE_ACCESS:
- acl = ext4_iget_acl(inode, &ei->i_acl);
- if (acl != EXT4_ACL_NOT_CACHED)
- return acl;
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
break;
-
case ACL_TYPE_DEFAULT:
- acl = ext4_iget_acl(inode, &ei->i_default_acl);
- if (acl != EXT4_ACL_NOT_CACHED)
- return acl;
name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
break;
-
default:
- return ERR_PTR(-EINVAL);
+ BUG();
}
retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
if (retval > 0) {
@@ -203,17 +171,9 @@
acl = ERR_PTR(retval);
kfree(value);
- if (!IS_ERR(acl)) {
- switch (type) {
- case ACL_TYPE_ACCESS:
- ext4_iset_acl(inode, &ei->i_acl, acl);
- break;
+ if (!IS_ERR(acl))
+ set_cached_acl(inode, type, acl);
- case ACL_TYPE_DEFAULT:
- ext4_iset_acl(inode, &ei->i_default_acl, acl);
- break;
- }
- }
return acl;
}
@@ -226,7 +186,6 @@
ext4_set_acl(handle_t *handle, struct inode *inode, int type,
struct posix_acl *acl)
{
- struct ext4_inode_info *ei = EXT4_I(inode);
int name_index;
void *value = NULL;
size_t size = 0;
@@ -271,17 +230,9 @@
value, size, 0);
kfree(value);
- if (!error) {
- switch (type) {
- case ACL_TYPE_ACCESS:
- ext4_iset_acl(inode, &ei->i_acl, acl);
- break;
+ if (!error)
+ set_cached_acl(inode, type, acl);
- case ACL_TYPE_DEFAULT:
- ext4_iset_acl(inode, &ei->i_default_acl, acl);
- break;
- }
- }
return error;
}
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index cb45257..949789d 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -53,10 +53,6 @@
#ifdef CONFIG_EXT4_FS_POSIX_ACL
-/* Value for inode->u.ext4_i.i_acl and inode->u.ext4_i.i_default_acl
- if the ACL has not been cached */
-#define EXT4_ACL_NOT_CACHED ((void *)-1)
-
/* acl.c */
extern int ext4_permission(struct inode *, int);
extern int ext4_acl_chmod(struct inode *);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 17b9998..0ddf7e5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -595,10 +595,6 @@
*/
struct rw_semaphore xattr_sem;
#endif
-#ifdef CONFIG_EXT4_FS_POSIX_ACL
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-#endif
struct list_head i_orphan; /* unlinked but open inodes */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7c17ae2..60a26f3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4453,10 +4453,6 @@
return inode;
ei = EXT4_I(inode);
-#ifdef CONFIG_EXT4_FS_POSIX_ACL
- ei->i_acl = EXT4_ACL_NOT_CACHED;
- ei->i_default_acl = EXT4_ACL_NOT_CACHED;
-#endif
ret = __ext4_get_inode_loc(inode, &iloc, 0);
if (ret < 0)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8bb9e2d..8f4f079 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -666,10 +666,6 @@
if (!ei)
return NULL;
-#ifdef CONFIG_EXT4_FS_POSIX_ACL
- ei->i_acl = EXT4_ACL_NOT_CACHED;
- ei->i_default_acl = EXT4_ACL_NOT_CACHED;
-#endif
ei->vfs_inode.i_version = 1;
ei->vfs_inode.i_data.writeback_index = 0;
memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
@@ -735,18 +731,6 @@
static void ext4_clear_inode(struct inode *inode)
{
-#ifdef CONFIG_EXT4_FS_POSIX_ACL
- if (EXT4_I(inode)->i_acl &&
- EXT4_I(inode)->i_acl != EXT4_ACL_NOT_CACHED) {
- posix_acl_release(EXT4_I(inode)->i_acl);
- EXT4_I(inode)->i_acl = EXT4_ACL_NOT_CACHED;
- }
- if (EXT4_I(inode)->i_default_acl &&
- EXT4_I(inode)->i_default_acl != EXT4_ACL_NOT_CACHED) {
- posix_acl_release(EXT4_I(inode)->i_default_acl);
- EXT4_I(inode)->i_default_acl = EXT4_ACL_NOT_CACHED;
- }
-#endif
ext4_discard_preallocations(inode);
if (EXT4_JOURNAL(inode))
jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index caf0491..c54226b 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -278,7 +278,26 @@
EXPORT_SYMBOL(sb_has_dirty_inodes);
/*
- * Write a single inode's dirty pages and inode data out to disk.
+ * Wait for writeback on an inode to complete.
+ */
+static void inode_wait_for_writeback(struct inode *inode)
+{
+ DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
+ wait_queue_head_t *wqh;
+
+ wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
+ do {
+ spin_unlock(&inode_lock);
+ __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
+ spin_lock(&inode_lock);
+ } while (inode->i_state & I_SYNC);
+}
+
+/*
+ * Write out an inode's dirty pages. Called under inode_lock. Either the
+ * caller has ref on the inode (either via __iget or via syscall against an fd)
+ * or the inode has I_WILL_FREE set (via generic_forget_inode)
+ *
* If `wait' is set, wait on the writeout.
*
* The whole writeout design is quite complex and fragile. We want to avoid
@@ -288,13 +307,38 @@
* Called under inode_lock.
*/
static int
-__sync_single_inode(struct inode *inode, struct writeback_control *wbc)
+writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
{
- unsigned dirty;
struct address_space *mapping = inode->i_mapping;
int wait = wbc->sync_mode == WB_SYNC_ALL;
+ unsigned dirty;
int ret;
+ if (!atomic_read(&inode->i_count))
+ WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
+ else
+ WARN_ON(inode->i_state & I_WILL_FREE);
+
+ if (inode->i_state & I_SYNC) {
+ /*
+ * If this inode is locked for writeback and we are not doing
+ * writeback-for-data-integrity, move it to s_more_io so that
+ * writeback can proceed with the other inodes on s_io.
+ *
+ * We'll have another go at writing back this inode when we
+ * completed a full scan of s_io.
+ */
+ if (!wait) {
+ requeue_io(inode);
+ return 0;
+ }
+
+ /*
+ * It's a data-integrity sync. We must wait.
+ */
+ inode_wait_for_writeback(inode);
+ }
+
BUG_ON(inode->i_state & I_SYNC);
/* Set I_SYNC, reset I_DIRTY */
@@ -390,50 +434,6 @@
}
/*
- * Write out an inode's dirty pages. Called under inode_lock. Either the
- * caller has ref on the inode (either via __iget or via syscall against an fd)
- * or the inode has I_WILL_FREE set (via generic_forget_inode)
- */
-static int
-__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
-{
- wait_queue_head_t *wqh;
-
- if (!atomic_read(&inode->i_count))
- WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
- else
- WARN_ON(inode->i_state & I_WILL_FREE);
-
- if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
- /*
- * We're skipping this inode because it's locked, and we're not
- * doing writeback-for-data-integrity. Move it to s_more_io so
- * that writeback can proceed with the other inodes on s_io.
- * We'll have another go at writing back this inode when we
- * completed a full scan of s_io.
- */
- requeue_io(inode);
- return 0;
- }
-
- /*
- * It's a data-integrity sync. We must wait.
- */
- if (inode->i_state & I_SYNC) {
- DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
-
- wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
- do {
- spin_unlock(&inode_lock);
- __wait_on_bit(wqh, &wq, inode_wait,
- TASK_UNINTERRUPTIBLE);
- spin_lock(&inode_lock);
- } while (inode->i_state & I_SYNC);
- }
- return __sync_single_inode(inode, wbc);
-}
-
-/*
* Write out a superblock's list of dirty inodes. A wait will be performed
* upon no inodes, all inodes or the final one, depending upon sync_mode.
*
@@ -526,7 +526,7 @@
BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
__iget(inode);
pages_skipped = wbc->pages_skipped;
- __writeback_single_inode(inode, wbc);
+ writeback_single_inode(inode, wbc);
if (current_is_pdflush())
writeback_release(bdi);
if (wbc->pages_skipped != pages_skipped) {
@@ -708,7 +708,7 @@
might_sleep();
spin_lock(&inode_lock);
- ret = __writeback_single_inode(inode, &wbc);
+ ret = writeback_single_inode(inode, &wbc);
spin_unlock(&inode_lock);
if (sync)
inode_sync_wait(inode);
@@ -732,7 +732,7 @@
int ret;
spin_lock(&inode_lock);
- ret = __writeback_single_inode(inode, wbc);
+ ret = writeback_single_inode(inode, wbc);
spin_unlock(&inode_lock);
return ret;
}
diff --git a/fs/inode.c b/fs/inode.c
index f643be5..901bad1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -25,6 +25,7 @@
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/async.h>
+#include <linux/posix_acl.h>
/*
* This is needed for the following functions:
@@ -189,6 +190,9 @@
}
inode->i_private = NULL;
inode->i_mapping = mapping;
+#ifdef CONFIG_FS_POSIX_ACL
+ inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
+#endif
#ifdef CONFIG_FSNOTIFY
inode->i_fsnotify_mask = 0;
@@ -227,6 +231,12 @@
ima_inode_free(inode);
security_inode_free(inode);
fsnotify_inode_delete(inode);
+#ifdef CONFIG_FS_POSIX_ACL
+ if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
+ posix_acl_release(inode->i_acl);
+ if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
+ posix_acl_release(inode->i_default_acl);
+#endif
if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode);
else
@@ -665,12 +675,17 @@
if (inode->i_mode & S_IFDIR) {
struct file_system_type *type = inode->i_sb->s_type;
- /*
- * ensure nobody is actually holding i_mutex
- */
- mutex_destroy(&inode->i_mutex);
- mutex_init(&inode->i_mutex);
- lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key);
+ /* Set new key only if filesystem hasn't already changed it */
+ if (!lockdep_match_class(&inode->i_mutex,
+ &type->i_mutex_key)) {
+ /*
+ * ensure nobody is actually holding i_mutex
+ */
+ mutex_destroy(&inode->i_mutex);
+ mutex_init(&inode->i_mutex);
+ lockdep_set_class(&inode->i_mutex,
+ &type->i_mutex_dir_key);
+ }
}
#endif
/*
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 001f8d3..5612880 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -15,6 +15,7 @@
#include <linux/uaccess.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>
+#include <linux/falloc.h>
#include <asm/ioctls.h>
@@ -403,6 +404,37 @@
#endif /* CONFIG_BLOCK */
+/*
+ * This provides compatibility with legacy XFS pre-allocation ioctls
+ * which predate the fallocate syscall.
+ *
+ * Only the l_start, l_len and l_whence fields of the 'struct space_resv'
+ * are used here, rest are ignored.
+ */
+int ioctl_preallocate(struct file *filp, void __user *argp)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ struct space_resv sr;
+
+ if (copy_from_user(&sr, argp, sizeof(sr)))
+ return -EFAULT;
+
+ switch (sr.l_whence) {
+ case SEEK_SET:
+ break;
+ case SEEK_CUR:
+ sr.l_start += filp->f_pos;
+ break;
+ case SEEK_END:
+ sr.l_start += i_size_read(inode);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return do_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
+}
+
static int file_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -414,6 +446,9 @@
return ioctl_fibmap(filp, p);
case FIONREAD:
return put_user(i_size_read(inode) - filp->f_pos, p);
+ case FS_IOC_RESVSP:
+ case FS_IOC_RESVSP64:
+ return ioctl_preallocate(filp, p);
}
return vfs_ioctl(filp, cmd, arg);
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 043740d..8fcb623 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -156,48 +156,25 @@
return ERR_PTR(-EINVAL);
}
-static struct posix_acl *jffs2_iget_acl(struct inode *inode, struct posix_acl **i_acl)
-{
- struct posix_acl *acl = JFFS2_ACL_NOT_CACHED;
-
- spin_lock(&inode->i_lock);
- if (*i_acl != JFFS2_ACL_NOT_CACHED)
- acl = posix_acl_dup(*i_acl);
- spin_unlock(&inode->i_lock);
- return acl;
-}
-
-static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct posix_acl *acl)
-{
- spin_lock(&inode->i_lock);
- if (*i_acl != JFFS2_ACL_NOT_CACHED)
- posix_acl_release(*i_acl);
- *i_acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
-}
-
static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct posix_acl *acl;
char *value = NULL;
int rc, xprefix;
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
+
switch (type) {
case ACL_TYPE_ACCESS:
- acl = jffs2_iget_acl(inode, &f->i_acl_access);
- if (acl != JFFS2_ACL_NOT_CACHED)
- return acl;
xprefix = JFFS2_XPREFIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
- acl = jffs2_iget_acl(inode, &f->i_acl_default);
- if (acl != JFFS2_ACL_NOT_CACHED)
- return acl;
xprefix = JFFS2_XPREFIX_ACL_DEFAULT;
break;
default:
- return ERR_PTR(-EINVAL);
+ BUG();
}
rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0);
if (rc > 0) {
@@ -215,16 +192,8 @@
}
if (value)
kfree(value);
- if (!IS_ERR(acl)) {
- switch (type) {
- case ACL_TYPE_ACCESS:
- jffs2_iset_acl(inode, &f->i_acl_access, acl);
- break;
- case ACL_TYPE_DEFAULT:
- jffs2_iset_acl(inode, &f->i_acl_default, acl);
- break;
- }
- }
+ if (!IS_ERR(acl))
+ set_cached_acl(inode, type, acl);
return acl;
}
@@ -249,7 +218,6 @@
static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
int rc, xprefix;
if (S_ISLNK(inode->i_mode))
@@ -285,16 +253,8 @@
return -EINVAL;
}
rc = __jffs2_set_acl(inode, xprefix, acl);
- if (!rc) {
- switch(type) {
- case ACL_TYPE_ACCESS:
- jffs2_iset_acl(inode, &f->i_acl_access, acl);
- break;
- case ACL_TYPE_DEFAULT:
- jffs2_iset_acl(inode, &f->i_acl_default, acl);
- break;
- }
- }
+ if (!rc)
+ set_cached_acl(inode, type, acl);
return rc;
}
@@ -321,12 +281,10 @@
int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct posix_acl *acl, *clone;
int rc;
- f->i_acl_default = NULL;
- f->i_acl_access = NULL;
+ cache_no_acl(inode);
if (S_ISLNK(*i_mode))
return 0; /* Symlink always has no-ACL */
@@ -339,7 +297,7 @@
*i_mode &= ~current_umask();
} else {
if (S_ISDIR(*i_mode))
- jffs2_iset_acl(inode, &f->i_acl_default, acl);
+ set_cached_acl(inode, ACL_TYPE_DEFAULT, acl);
clone = posix_acl_clone(acl, GFP_KERNEL);
if (!clone)
@@ -350,7 +308,7 @@
return rc;
}
if (rc > 0)
- jffs2_iset_acl(inode, &f->i_acl_access, clone);
+ set_cached_acl(inode, ACL_TYPE_ACCESS, clone);
posix_acl_release(clone);
}
@@ -359,17 +317,16 @@
int jffs2_init_acl_post(struct inode *inode)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
int rc;
- if (f->i_acl_default) {
- rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, f->i_acl_default);
+ if (inode->i_default_acl) {
+ rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, inode->i_default_acl);
if (rc)
return rc;
}
- if (f->i_acl_access) {
- rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, f->i_acl_access);
+ if (inode->i_acl) {
+ rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, inode->i_acl);
if (rc)
return rc;
}
@@ -377,18 +334,6 @@
return 0;
}
-void jffs2_clear_acl(struct jffs2_inode_info *f)
-{
- if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) {
- posix_acl_release(f->i_acl_access);
- f->i_acl_access = JFFS2_ACL_NOT_CACHED;
- }
- if (f->i_acl_default && f->i_acl_default != JFFS2_ACL_NOT_CACHED) {
- posix_acl_release(f->i_acl_default);
- f->i_acl_default = JFFS2_ACL_NOT_CACHED;
- }
-}
-
int jffs2_acl_chmod(struct inode *inode)
{
struct posix_acl *acl, *clone;
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 8ca058a..fc929f2 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -26,13 +26,10 @@
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
-#define JFFS2_ACL_NOT_CACHED ((void *)-1)
-
extern int jffs2_permission(struct inode *, int);
extern int jffs2_acl_chmod(struct inode *);
extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
extern int jffs2_init_acl_post(struct inode *);
-extern void jffs2_clear_acl(struct jffs2_inode_info *);
extern struct xattr_handler jffs2_acl_access_xattr_handler;
extern struct xattr_handler jffs2_acl_default_xattr_handler;
@@ -43,6 +40,5 @@
#define jffs2_acl_chmod(inode) (0)
#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
#define jffs2_init_acl_post(inode) (0)
-#define jffs2_clear_acl(f)
#endif /* CONFIG_JFFS2_FS_POSIX_ACL */
diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h
index 4c41db9..c6923da 100644
--- a/fs/jffs2/jffs2_fs_i.h
+++ b/fs/jffs2/jffs2_fs_i.h
@@ -50,10 +50,6 @@
uint16_t flags;
uint8_t usercompr;
struct inode vfs_inode;
-#ifdef CONFIG_JFFS2_FS_POSIX_ACL
- struct posix_acl *i_acl_access;
- struct posix_acl *i_acl_default;
-#endif
};
#endif /* _JFFS2_FS_I */
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 2228380..a7f03b7 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -56,10 +56,6 @@
f->target = NULL;
f->flags = 0;
f->usercompr = 0;
-#ifdef CONFIG_JFFS2_FS_POSIX_ACL
- f->i_acl_access = JFFS2_ACL_NOT_CACHED;
- f->i_acl_default = JFFS2_ACL_NOT_CACHED;
-#endif
}
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 1fc1e92..1a80301 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1424,7 +1424,6 @@
struct jffs2_full_dirent *fd, *fds;
int deleted;
- jffs2_clear_acl(f);
jffs2_xattr_delete_inode(c, f->inocache);
mutex_lock(&f->sem);
deleted = f->inocache && !f->inocache->pino_nlink;
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 1d437de..7515e73 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -196,7 +196,7 @@
if (c->nextblock) {
ret = file_dirty(c, c->nextblock);
if (ret)
- return ret;
+ goto out;
/* deleting summary information of the old nextblock */
jffs2_sum_reset_collected(c->summary);
}
@@ -207,7 +207,7 @@
} else {
ret = file_dirty(c, jeb);
if (ret)
- return ret;
+ goto out;
}
break;
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index 06ca1b8..91fa3ad 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -31,27 +31,24 @@
{
struct posix_acl *acl;
char *ea_name;
- struct jfs_inode_info *ji = JFS_IP(inode);
- struct posix_acl **p_acl;
int size;
char *value = NULL;
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
+
switch(type) {
case ACL_TYPE_ACCESS:
ea_name = POSIX_ACL_XATTR_ACCESS;
- p_acl = &ji->i_acl;
break;
case ACL_TYPE_DEFAULT:
ea_name = POSIX_ACL_XATTR_DEFAULT;
- p_acl = &ji->i_default_acl;
break;
default:
return ERR_PTR(-EINVAL);
}
- if (*p_acl != JFS_ACL_NOT_CACHED)
- return posix_acl_dup(*p_acl);
-
size = __jfs_getxattr(inode, ea_name, NULL, 0);
if (size > 0) {
@@ -62,17 +59,18 @@
}
if (size < 0) {
- if (size == -ENODATA) {
- *p_acl = NULL;
+ if (size == -ENODATA)
acl = NULL;
- } else
+ else
acl = ERR_PTR(size);
} else {
acl = posix_acl_from_xattr(value, size);
- if (!IS_ERR(acl))
- *p_acl = posix_acl_dup(acl);
}
kfree(value);
+ if (!IS_ERR(acl)) {
+ set_cached_acl(inode, type, acl);
+ posix_acl_release(acl);
+ }
return acl;
}
@@ -80,8 +78,6 @@
struct posix_acl *acl)
{
char *ea_name;
- struct jfs_inode_info *ji = JFS_IP(inode);
- struct posix_acl **p_acl;
int rc;
int size = 0;
char *value = NULL;
@@ -92,11 +88,9 @@
switch(type) {
case ACL_TYPE_ACCESS:
ea_name = POSIX_ACL_XATTR_ACCESS;
- p_acl = &ji->i_acl;
break;
case ACL_TYPE_DEFAULT:
ea_name = POSIX_ACL_XATTR_DEFAULT;
- p_acl = &ji->i_default_acl;
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
break;
@@ -116,27 +110,24 @@
out:
kfree(value);
- if (!rc) {
- if (*p_acl && (*p_acl != JFS_ACL_NOT_CACHED))
- posix_acl_release(*p_acl);
- *p_acl = posix_acl_dup(acl);
- }
+ if (!rc)
+ set_cached_acl(inode, type, acl);
+
return rc;
}
static int jfs_check_acl(struct inode *inode, int mask)
{
- struct jfs_inode_info *ji = JFS_IP(inode);
+ struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
- if (ji->i_acl == JFS_ACL_NOT_CACHED) {
- struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
- if (IS_ERR(acl))
- return PTR_ERR(acl);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ int error = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
+ return error;
}
- if (ji->i_acl)
- return posix_acl_permission(inode, ji->i_acl, mask);
return -EAGAIN;
}
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 439901d..1439f11 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -74,10 +74,6 @@
/* xattr_sem allows us to access the xattrs without taking i_mutex */
struct rw_semaphore xattr_sem;
lid_t xtlid; /* lid of xtree lock on directory */
-#ifdef CONFIG_JFS_POSIX_ACL
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-#endif
union {
struct {
xtpage_t _xtroot; /* 288: xtree root */
@@ -107,8 +103,6 @@
#define i_inline u.link._inline
#define i_inline_ea u.link._inline_ea
-#define JFS_ACL_NOT_CACHED ((void *)-1)
-
#define IREAD_LOCK(ip, subclass) \
down_read_nested(&JFS_IP(ip)->rdwrlock, subclass)
#define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 09b1b6e..37e6dcd 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -128,18 +128,6 @@
ji->active_ag = -1;
}
spin_unlock_irq(&ji->ag_lock);
-
-#ifdef CONFIG_JFS_POSIX_ACL
- if (ji->i_acl != JFS_ACL_NOT_CACHED) {
- posix_acl_release(ji->i_acl);
- ji->i_acl = JFS_ACL_NOT_CACHED;
- }
- if (ji->i_default_acl != JFS_ACL_NOT_CACHED) {
- posix_acl_release(ji->i_default_acl);
- ji->i_default_acl = JFS_ACL_NOT_CACHED;
- }
-#endif
-
kmem_cache_free(jfs_inode_cachep, ji);
}
@@ -798,10 +786,6 @@
init_rwsem(&jfs_ip->xattr_sem);
spin_lock_init(&jfs_ip->ag_lock);
jfs_ip->active_ag = -1;
-#ifdef CONFIG_JFS_POSIX_ACL
- jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
- jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
-#endif
inode_init_once(&jfs_ip->vfs_inode);
}
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 61dfa81..fad3645 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -727,10 +727,7 @@
/*
* We're changing the ACL. Get rid of the cached one
*/
- acl =JFS_IP(inode)->i_acl;
- if (acl != JFS_ACL_NOT_CACHED)
- posix_acl_release(acl);
- JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED;
+ forget_cached_acl(inode, ACL_TYPE_ACCESS);
return 0;
} else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0) {
@@ -746,10 +743,7 @@
/*
* We're changing the default ACL. Get rid of the cached one
*/
- acl =JFS_IP(inode)->i_default_acl;
- if (acl && (acl != JFS_ACL_NOT_CACHED))
- posix_acl_release(acl);
- JFS_IP(inode)->i_default_acl = JFS_ACL_NOT_CACHED;
+ forget_cached_acl(inode, ACL_TYPE_DEFAULT);
return 0;
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index dd79570..f2fdcbc 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -126,7 +126,6 @@
struct nlm_lock *lock = &argp->lock;
nlmclnt_next_cookie(&argp->cookie);
- argp->state = nsm_local_state;
memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
lock->caller = utsname()->nodename;
lock->oh.data = req->a_owner;
@@ -165,6 +164,7 @@
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
+ lock_kernel();
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
@@ -178,6 +178,7 @@
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
+ unlock_kernel();
dprintk("lockd: clnt proc returns %d\n", status);
return status;
@@ -519,6 +520,7 @@
if (nsm_monitor(host) < 0)
goto out;
+ req->a_args.state = nsm_local_state;
fl->fl_flags |= FL_ACCESS;
status = do_vfs_lock(fl);
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 6d5d4a4..7fce1b5 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -53,7 +53,7 @@
/*
* Local NSM state
*/
-int __read_mostly nsm_local_state;
+u32 __read_mostly nsm_local_state;
int __read_mostly nsm_use_hostnames;
static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
@@ -112,6 +112,7 @@
.program = &nsm_program,
.version = NSM_VERSION,
.authflavor = RPC_AUTH_NULL,
+ .flags = RPC_CLNT_CREATE_NOPING,
};
return rpc_create(&args);
@@ -184,13 +185,19 @@
nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
status = nsm_mon_unmon(nsm, NSMPROC_MON, &res);
- if (res.status != 0)
+ if (unlikely(res.status != 0))
status = -EIO;
- if (status < 0)
+ if (unlikely(status < 0)) {
printk(KERN_NOTICE "lockd: cannot monitor %s\n", nsm->sm_name);
- else
- nsm->sm_monitored = 1;
- return status;
+ return status;
+ }
+
+ nsm->sm_monitored = 1;
+ if (unlikely(nsm_local_state != res.state)) {
+ nsm_local_state = res.state;
+ dprintk("lockd: NSM state changed to %d\n", nsm_local_state);
+ }
+ return 0;
}
/**
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 83ee342..e577a78 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -326,6 +326,8 @@
{
if (call->a_args.lock.oh.data != call->a_owner)
kfree(call->a_args.lock.oh.data);
+
+ locks_release_private(&call->a_args.lock.fl);
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index ec3deea..b6440f5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -151,7 +151,7 @@
return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
}
-static void locks_release_private(struct file_lock *fl)
+void locks_release_private(struct file_lock *fl)
{
if (fl->fl_ops) {
if (fl->fl_ops->fl_release_private)
@@ -165,6 +165,7 @@
}
}
+EXPORT_SYMBOL_GPL(locks_release_private);
/* Free a lock which is not in use. */
static void locks_free_lock(struct file_lock *fl)
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index cb7fdd1..9dcf95b4 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -1,3 +1,6 @@
+#ifndef FS_MINIX_H
+#define FS_MINIX_H
+
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/minix_fs.h>
@@ -86,3 +89,5 @@
{
return list_entry(inode, struct minix_inode_info, vfs_inode);
}
+
+#endif /* FS_MINIX_H */
diff --git a/fs/namei.c b/fs/namei.c
index 527119a..5b961eb 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1698,8 +1698,11 @@
if (error)
return ERR_PTR(error);
error = path_walk(pathname, &nd);
- if (error)
+ if (error) {
+ if (nd.root.mnt)
+ path_put(&nd.root);
return ERR_PTR(error);
+ }
if (unlikely(!audit_dummy_context()))
audit_inode(pathname, nd.path.dentry);
@@ -1759,6 +1762,8 @@
}
filp = nameidata_to_filp(&nd, open_flag);
mnt_drop_write(nd.path.mnt);
+ if (nd.root.mnt)
+ path_put(&nd.root);
return filp;
}
@@ -1819,6 +1824,8 @@
*/
if (will_write)
mnt_drop_write(nd.path.mnt);
+ if (nd.root.mnt)
+ path_put(&nd.root);
return filp;
exit_mutex_unlock:
@@ -1859,6 +1866,8 @@
* with "intent.open".
*/
release_open_intent(&nd);
+ if (nd.root.mnt)
+ path_put(&nd.root);
return ERR_PTR(error);
}
nd.flags &= ~LOOKUP_PARENT;
diff --git a/fs/namespace.c b/fs/namespace.c
index 2dd333b..3dc283f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -42,6 +42,8 @@
static int event;
static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
+static int mnt_id_start = 0;
+static int mnt_group_start = 1;
static struct list_head *mount_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
@@ -69,7 +71,9 @@
retry:
ida_pre_get(&mnt_id_ida, GFP_KERNEL);
spin_lock(&vfsmount_lock);
- res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
+ res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
+ if (!res)
+ mnt_id_start = mnt->mnt_id + 1;
spin_unlock(&vfsmount_lock);
if (res == -EAGAIN)
goto retry;
@@ -79,8 +83,11 @@
static void mnt_free_id(struct vfsmount *mnt)
{
+ int id = mnt->mnt_id;
spin_lock(&vfsmount_lock);
- ida_remove(&mnt_id_ida, mnt->mnt_id);
+ ida_remove(&mnt_id_ida, id);
+ if (mnt_id_start > id)
+ mnt_id_start = id;
spin_unlock(&vfsmount_lock);
}
@@ -91,10 +98,18 @@
*/
static int mnt_alloc_group_id(struct vfsmount *mnt)
{
+ int res;
+
if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
return -ENOMEM;
- return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
+ res = ida_get_new_above(&mnt_group_ida,
+ mnt_group_start,
+ &mnt->mnt_group_id);
+ if (!res)
+ mnt_group_start = mnt->mnt_group_id + 1;
+
+ return res;
}
/*
@@ -102,7 +117,10 @@
*/
void mnt_release_group_id(struct vfsmount *mnt)
{
- ida_remove(&mnt_group_ida, mnt->mnt_group_id);
+ int id = mnt->mnt_group_id;
+ ida_remove(&mnt_group_ida, id);
+ if (mnt_group_start > id)
+ mnt_group_start = id;
mnt->mnt_group_id = 0;
}
@@ -1937,6 +1955,21 @@
return retval;
}
+static struct mnt_namespace *alloc_mnt_ns(void)
+{
+ struct mnt_namespace *new_ns;
+
+ new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
+ if (!new_ns)
+ return ERR_PTR(-ENOMEM);
+ atomic_set(&new_ns->count, 1);
+ new_ns->root = NULL;
+ INIT_LIST_HEAD(&new_ns->list);
+ init_waitqueue_head(&new_ns->poll);
+ new_ns->event = 0;
+ return new_ns;
+}
+
/*
* Allocate a new namespace structure and populate it with contents
* copied from the namespace of the passed in task structure.
@@ -1948,14 +1981,9 @@
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
struct vfsmount *p, *q;
- new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
- if (!new_ns)
- return ERR_PTR(-ENOMEM);
-
- atomic_set(&new_ns->count, 1);
- INIT_LIST_HEAD(&new_ns->list);
- init_waitqueue_head(&new_ns->poll);
- new_ns->event = 0;
+ new_ns = alloc_mnt_ns();
+ if (IS_ERR(new_ns))
+ return new_ns;
down_write(&namespace_sem);
/* First pass: copy the tree topology */
@@ -2019,6 +2047,24 @@
return new_ns;
}
+/**
+ * create_mnt_ns - creates a private namespace and adds a root filesystem
+ * @mnt: pointer to the new root filesystem mountpoint
+ */
+struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
+{
+ struct mnt_namespace *new_ns;
+
+ new_ns = alloc_mnt_ns();
+ if (!IS_ERR(new_ns)) {
+ mnt->mnt_ns = new_ns;
+ new_ns->root = mnt;
+ list_add(&new_ns->list, &new_ns->root->mnt_list);
+ }
+ return new_ns;
+}
+EXPORT_SYMBOL(create_mnt_ns);
+
SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
char __user *, type, unsigned long, flags, void __user *, data)
{
@@ -2194,16 +2240,9 @@
mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
if (IS_ERR(mnt))
panic("Can't create rootfs");
- ns = kmalloc(sizeof(*ns), GFP_KERNEL);
- if (!ns)
+ ns = create_mnt_ns(mnt);
+ if (IS_ERR(ns))
panic("Can't allocate initial namespace");
- atomic_set(&ns->count, 1);
- INIT_LIST_HEAD(&ns->list);
- init_waitqueue_head(&ns->poll);
- ns->event = 0;
- list_add(&mnt->mnt_list, &ns->list);
- ns->root = mnt;
- mnt->mnt_ns = ns;
init_task.nsproxy->mnt_ns = ns;
get_mnt_ns(ns);
@@ -2246,10 +2285,14 @@
init_mount_tree();
}
-void __put_mnt_ns(struct mnt_namespace *ns)
+void put_mnt_ns(struct mnt_namespace *ns)
{
- struct vfsmount *root = ns->root;
+ struct vfsmount *root;
LIST_HEAD(umount_list);
+
+ if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock))
+ return;
+ root = ns->root;
ns->root = NULL;
spin_unlock(&vfsmount_lock);
down_write(&namespace_sem);
@@ -2260,3 +2303,4 @@
release_mounts(&umount_list);
kfree(ns);
}
+EXPORT_SYMBOL(put_mnt_ns);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index e67f3ec..2a77bc2 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -1,6 +1,6 @@
config NFS_FS
tristate "NFS client support"
- depends on INET
+ depends on INET && FILE_LOCKING
select LOCKD
select SUNRPC
select NFS_ACL_SUPPORT if NFS_V3_ACL
@@ -74,6 +74,15 @@
If unsure, say N.
+config NFS_V4_1
+ bool "NFS client support for NFSv4.1 (DEVELOPER ONLY)"
+ depends on NFS_V4 && EXPERIMENTAL
+ help
+ This option enables support for minor version 1 of the NFSv4 protocol
+ (draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
+
+ Unless you're an NFS developer, say N.
+
config ROOT_NFS
bool "Root file system on NFS"
depends on NFS_FS=y && IP_PNP
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index a886e69..7f604c7 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -17,6 +17,9 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/sunrpc/svcauth_gss.h>
+#if defined(CONFIG_NFS_V4_1)
+#include <linux/sunrpc/bc_xprt.h>
+#endif
#include <net/inet_sock.h>
@@ -28,11 +31,12 @@
struct nfs_callback_data {
unsigned int users;
+ struct svc_serv *serv;
struct svc_rqst *rqst;
struct task_struct *task;
};
-static struct nfs_callback_data nfs_callback_info;
+static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
@@ -56,10 +60,10 @@
&nfs_callback_set_tcpport, 0644);
/*
- * This is the callback kernel thread.
+ * This is the NFSv4 callback kernel thread.
*/
static int
-nfs_callback_svc(void *vrqstp)
+nfs4_callback_svc(void *vrqstp)
{
int err, preverr = 0;
struct svc_rqst *rqstp = vrqstp;
@@ -97,20 +101,12 @@
}
/*
- * Bring up the callback thread if it is not already up.
+ * Prepare to bring up the NFSv4 callback service
*/
-int nfs_callback_up(void)
+struct svc_rqst *
+nfs4_callback_up(struct svc_serv *serv)
{
- struct svc_serv *serv = NULL;
- int ret = 0;
-
- mutex_lock(&nfs_callback_mutex);
- if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
- goto out;
- serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
- ret = -ENOMEM;
- if (!serv)
- goto out_err;
+ int ret;
ret = svc_create_xprt(serv, "tcp", PF_INET,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
@@ -127,27 +123,174 @@
nfs_callback_tcpport6 = ret;
dprintk("NFS: Callback listener port = %u (af %u)\n",
nfs_callback_tcpport6, PF_INET6);
- } else if (ret != -EAFNOSUPPORT)
+ } else if (ret == -EAFNOSUPPORT)
+ ret = 0;
+ else
goto out_err;
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
- nfs_callback_info.rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
- if (IS_ERR(nfs_callback_info.rqst)) {
- ret = PTR_ERR(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
+ return svc_prepare_thread(serv, &serv->sv_pools[0]);
+
+out_err:
+ if (ret == 0)
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * The callback service for NFSv4.1 callbacks
+ */
+static int
+nfs41_callback_svc(void *vrqstp)
+{
+ struct svc_rqst *rqstp = vrqstp;
+ struct svc_serv *serv = rqstp->rq_server;
+ struct rpc_rqst *req;
+ int error;
+ DEFINE_WAIT(wq);
+
+ set_freezable();
+
+ /*
+ * FIXME: do we really need to run this under the BKL? If so, please
+ * add a comment about what it's intended to protect.
+ */
+ lock_kernel();
+ while (!kthread_should_stop()) {
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+ if (!list_empty(&serv->sv_cb_list)) {
+ req = list_first_entry(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
+ list_del(&req->rq_bc_list);
+ spin_unlock_bh(&serv->sv_cb_lock);
+ dprintk("Invoking bc_svc_process()\n");
+ error = bc_svc_process(serv, req, rqstp);
+ dprintk("bc_svc_process() returned w/ error code= %d\n",
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+ schedule();
+ }
+ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
+ unlock_kernel();
+ return 0;
+}
+
+/*
+ * Bring up the NFSv4.1 callback service
+ */
+struct svc_rqst *
+nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
+{
+ struct svc_xprt *bc_xprt;
+ struct svc_rqst *rqstp = ERR_PTR(-ENOMEM);
+
+ dprintk("--> %s\n", __func__);
+ /* Create a svc_sock for the service */
+ bc_xprt = svc_sock_create(serv, xprt->prot);
+ if (!bc_xprt)
+ goto out;
+
+ /*
+ * Save the svc_serv in the transport so that it can
+ * be referenced when the session backchannel is initialized
+ */
+ serv->bc_xprt = bc_xprt;
+ xprt->bc_serv = serv;
+
+ INIT_LIST_HEAD(&serv->sv_cb_list);
+ spin_lock_init(&serv->sv_cb_lock);
+ init_waitqueue_head(&serv->sv_cb_waitq);
+ rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
+ if (IS_ERR(rqstp))
+ svc_sock_destroy(bc_xprt);
+out:
+ dprintk("--> %s return %p\n", __func__, rqstp);
+ return rqstp;
+}
+
+static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
+ struct svc_serv *serv, struct rpc_xprt *xprt,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ if (minorversion) {
+ *rqstpp = nfs41_callback_up(serv, xprt);
+ *callback_svc = nfs41_callback_svc;
+ }
+ return minorversion;
+}
+
+static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
+ struct nfs_callback_data *cb_info)
+{
+ if (minorversion)
+ xprt->bc_serv = cb_info->serv;
+}
+#else
+static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
+ struct svc_serv *serv, struct rpc_xprt *xprt,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ return 0;
+}
+
+static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
+ struct nfs_callback_data *cb_info)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Bring up the callback thread if it is not already up.
+ */
+int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
+{
+ struct svc_serv *serv = NULL;
+ struct svc_rqst *rqstp;
+ int (*callback_svc)(void *vrqstp);
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+ char svc_name[12];
+ int ret = 0;
+ int minorversion_setup;
+
+ mutex_lock(&nfs_callback_mutex);
+ if (cb_info->users++ || cb_info->task != NULL) {
+ nfs_callback_bc_serv(minorversion, xprt, cb_info);
+ goto out;
+ }
+ serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
+ if (!serv) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion,
+ serv, xprt, &rqstp, &callback_svc);
+ if (!minorversion_setup) {
+ /* v4.0 callback setup */
+ rqstp = nfs4_callback_up(serv);
+ callback_svc = nfs4_callback_svc;
+ }
+
+ if (IS_ERR(rqstp)) {
+ ret = PTR_ERR(rqstp);
goto out_err;
}
svc_sock_update_bufs(serv);
- nfs_callback_info.task = kthread_run(nfs_callback_svc,
- nfs_callback_info.rqst,
- "nfsv4-svc");
- if (IS_ERR(nfs_callback_info.task)) {
- ret = PTR_ERR(nfs_callback_info.task);
- svc_exit_thread(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
- nfs_callback_info.task = NULL;
+ sprintf(svc_name, "nfsv4.%u-svc", minorversion);
+ cb_info->serv = serv;
+ cb_info->rqst = rqstp;
+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
+ if (IS_ERR(cb_info->task)) {
+ ret = PTR_ERR(cb_info->task);
+ svc_exit_thread(cb_info->rqst);
+ cb_info->rqst = NULL;
+ cb_info->task = NULL;
goto out_err;
}
out:
@@ -164,22 +307,25 @@
out_err:
dprintk("NFS: Couldn't create callback socket or server thread; "
"err = %d\n", ret);
- nfs_callback_info.users--;
+ cb_info->users--;
goto out;
}
/*
* Kill the callback thread if it's no longer being used.
*/
-void nfs_callback_down(void)
+void nfs_callback_down(int minorversion)
{
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+
mutex_lock(&nfs_callback_mutex);
- nfs_callback_info.users--;
- if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL) {
- kthread_stop(nfs_callback_info.task);
- svc_exit_thread(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
- nfs_callback_info.task = NULL;
+ cb_info->users--;
+ if (cb_info->users == 0 && cb_info->task != NULL) {
+ kthread_stop(cb_info->task);
+ svc_exit_thread(cb_info->rqst);
+ cb_info->serv = NULL;
+ cb_info->rqst = NULL;
+ cb_info->task = NULL;
}
mutex_unlock(&nfs_callback_mutex);
}
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index e110e28..07baa82 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -20,13 +20,24 @@
enum nfs4_callback_opnum {
OP_CB_GETATTR = 3,
OP_CB_RECALL = 4,
+/* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
OP_CB_ILLEGAL = 10044,
};
struct cb_compound_hdr_arg {
unsigned int taglen;
const char *tag;
- unsigned int callback_ident;
+ unsigned int minorversion;
unsigned nops;
};
@@ -59,16 +70,59 @@
uint32_t truncate;
};
+#if defined(CONFIG_NFS_V4_1)
+
+struct referring_call {
+ uint32_t rc_sequenceid;
+ uint32_t rc_slotid;
+};
+
+struct referring_call_list {
+ struct nfs4_sessionid rcl_sessionid;
+ uint32_t rcl_nrefcalls;
+ struct referring_call *rcl_refcalls;
+};
+
+struct cb_sequenceargs {
+ struct sockaddr *csa_addr;
+ struct nfs4_sessionid csa_sessionid;
+ uint32_t csa_sequenceid;
+ uint32_t csa_slotid;
+ uint32_t csa_highestslotid;
+ uint32_t csa_cachethis;
+ uint32_t csa_nrclists;
+ struct referring_call_list *csa_rclists;
+};
+
+struct cb_sequenceres {
+ __be32 csr_status;
+ struct nfs4_sessionid csr_sessionid;
+ uint32_t csr_sequenceid;
+ uint32_t csr_slotid;
+ uint32_t csr_highestslotid;
+ uint32_t csr_target_highestslotid;
+};
+
+extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res);
+
+#endif /* CONFIG_NFS_V4_1 */
+
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
#ifdef CONFIG_NFS_V4
-extern int nfs_callback_up(void);
-extern void nfs_callback_down(void);
-#else
-#define nfs_callback_up() (0)
-#define nfs_callback_down() do {} while(0)
-#endif
+extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
+extern void nfs_callback_down(int minorversion);
+#endif /* CONFIG_NFS_V4 */
+
+/*
+ * nfs41: Callbacks are expected to not cause substantial latency,
+ * so we limit their concurrency to 1 by setting up the maximum number
+ * of slots for the backchannel.
+ */
+#define NFS41_BC_MIN_CALLBACKS 1
+#define NFS41_BC_MAX_CALLBACKS 1
extern unsigned int nfs_callback_set_tcpport;
extern unsigned short nfs_callback_tcpport;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index f7e83e2..b7da1f5 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -101,3 +101,130 @@
dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
return res;
}
+
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * Validate the sequenceID sent by the server.
+ * Return success if the sequenceID is one more than what we last saw on
+ * this slot, accounting for wraparound. Increments the slot's sequence.
+ *
+ * We don't yet implement a duplicate request cache, so at this time
+ * we will log replays, and process them as if we had not seen them before,
+ * but we don't bump the sequence in the slot. Not too worried about it,
+ * since we only currently implement idempotent callbacks anyway.
+ *
+ * We have a single slot backchannel at this time, so we don't bother
+ * checking the used_slots bit array on the table. The lower layer guarantees
+ * a single outstanding callback request at a time.
+ */
+static int
+validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid)
+{
+ struct nfs4_slot *slot;
+
+ dprintk("%s enter. slotid %d seqid %d\n",
+ __func__, slotid, seqid);
+
+ if (slotid > NFS41_BC_MAX_CALLBACKS)
+ return htonl(NFS4ERR_BADSLOT);
+
+ slot = tbl->slots + slotid;
+ dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
+
+ /* Normal */
+ if (likely(seqid == slot->seq_nr + 1)) {
+ slot->seq_nr++;
+ return htonl(NFS4_OK);
+ }
+
+ /* Replay */
+ if (seqid == slot->seq_nr) {
+ dprintk("%s seqid %d is a replay - no DRC available\n",
+ __func__, seqid);
+ return htonl(NFS4_OK);
+ }
+
+ /* Wraparound */
+ if (seqid == 1 && (slot->seq_nr + 1) == 0) {
+ slot->seq_nr = 1;
+ return htonl(NFS4_OK);
+ }
+
+ /* Misordered request */
+ return htonl(NFS4ERR_SEQ_MISORDERED);
+}
+
+/*
+ * Returns a pointer to a held 'struct nfs_client' that matches the server's
+ * address, major version number, and session ID. It is the caller's
+ * responsibility to release the returned reference.
+ *
+ * Returns NULL if there are no connections with sessions, or if no session
+ * matches the one of interest.
+ */
+ static struct nfs_client *find_client_with_session(
+ const struct sockaddr *addr, u32 nfsversion,
+ struct nfs4_sessionid *sessionid)
+{
+ struct nfs_client *clp;
+
+ clp = nfs_find_client(addr, 4);
+ if (clp == NULL)
+ return NULL;
+
+ do {
+ struct nfs_client *prev = clp;
+
+ if (clp->cl_session != NULL) {
+ if (memcmp(clp->cl_session->sess_id.data,
+ sessionid->data,
+ NFS4_MAX_SESSIONID_LEN) == 0) {
+ /* Returns a held reference to clp */
+ return clp;
+ }
+ }
+ clp = nfs_find_client_next(prev);
+ nfs_put_client(prev);
+ } while (clp != NULL);
+
+ return NULL;
+}
+
+/* FIXME: referring calls should be processed */
+unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res)
+{
+ struct nfs_client *clp;
+ int i, status;
+
+ for (i = 0; i < args->csa_nrclists; i++)
+ kfree(args->csa_rclists[i].rcl_refcalls);
+ kfree(args->csa_rclists);
+
+ status = htonl(NFS4ERR_BADSESSION);
+ clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
+ if (clp == NULL)
+ goto out;
+
+ status = validate_seqid(&clp->cl_session->bc_slot_table,
+ args->csa_slotid, args->csa_sequenceid);
+ if (status)
+ goto out_putclient;
+
+ memcpy(&res->csr_sessionid, &args->csa_sessionid,
+ sizeof(res->csr_sessionid));
+ res->csr_sequenceid = args->csa_sequenceid;
+ res->csr_slotid = args->csa_slotid;
+ res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+ res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+
+out_putclient:
+ nfs_put_client(clp);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ res->csr_status = status;
+ return res->csr_status;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index dd0ef34..e5a2dac 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -20,6 +20,11 @@
2 + 2 + 3 + 3)
#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#if defined(CONFIG_NFS_V4_1)
+#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
+ 4 + 1 + 3)
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFSDBG_FACILITY NFSDBG_CALLBACK
typedef __be32 (*callback_process_op_t)(void *, void *);
@@ -132,7 +137,6 @@
static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
{
__be32 *p;
- unsigned int minor_version;
__be32 status;
status = decode_string(xdr, &hdr->taglen, &hdr->tag);
@@ -147,15 +151,19 @@
p = read_buf(xdr, 12);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
- minor_version = ntohl(*p++);
- /* Check minor version is zero. */
- if (minor_version != 0) {
- printk(KERN_WARNING "%s: NFSv4 server callback with illegal minor version %u!\n",
- __func__, minor_version);
+ hdr->minorversion = ntohl(*p++);
+ /* Check minor version is zero or one. */
+ if (hdr->minorversion <= 1) {
+ p++; /* skip callback_ident */
+ } else {
+ printk(KERN_WARNING "%s: NFSv4 server callback with "
+ "illegal minor version %u!\n",
+ __func__, hdr->minorversion);
return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
}
- hdr->callback_ident = ntohl(*p++);
hdr->nops = ntohl(*p);
+ dprintk("%s: minorversion %d nops %d\n", __func__,
+ hdr->minorversion, hdr->nops);
return 0;
}
@@ -204,6 +212,122 @@
return status;
}
+#if defined(CONFIG_NFS_V4_1)
+
+static unsigned decode_sessionid(struct xdr_stream *xdr,
+ struct nfs4_sessionid *sid)
+{
+ uint32_t *p;
+ int len = NFS4_MAX_SESSIONID_LEN;
+
+ p = read_buf(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);;
+
+ memcpy(sid->data, p, len);
+ return 0;
+}
+
+static unsigned decode_rc_list(struct xdr_stream *xdr,
+ struct referring_call_list *rc_list)
+{
+ uint32_t *p;
+ int i;
+ unsigned status;
+
+ status = decode_sessionid(xdr, &rc_list->rcl_sessionid);
+ if (status)
+ goto out;
+
+ status = htonl(NFS4ERR_RESOURCE);
+ p = read_buf(xdr, sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+
+ rc_list->rcl_nrefcalls = ntohl(*p++);
+ if (rc_list->rcl_nrefcalls) {
+ p = read_buf(xdr,
+ rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+ rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
+ sizeof(*rc_list->rcl_refcalls),
+ GFP_KERNEL);
+ if (unlikely(rc_list->rcl_refcalls == NULL))
+ goto out;
+ for (i = 0; i < rc_list->rcl_nrefcalls; i++) {
+ rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++);
+ rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++);
+ }
+ }
+ status = 0;
+
+out:
+ return status;
+}
+
+static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_sequenceargs *args)
+{
+ uint32_t *p;
+ int i;
+ unsigned status;
+
+ status = decode_sessionid(xdr, &args->csa_sessionid);
+ if (status)
+ goto out;
+
+ status = htonl(NFS4ERR_RESOURCE);
+ p = read_buf(xdr, 5 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+
+ args->csa_addr = svc_addr(rqstp);
+ args->csa_sequenceid = ntohl(*p++);
+ args->csa_slotid = ntohl(*p++);
+ args->csa_highestslotid = ntohl(*p++);
+ args->csa_cachethis = ntohl(*p++);
+ args->csa_nrclists = ntohl(*p++);
+ args->csa_rclists = NULL;
+ if (args->csa_nrclists) {
+ args->csa_rclists = kmalloc(args->csa_nrclists *
+ sizeof(*args->csa_rclists),
+ GFP_KERNEL);
+ if (unlikely(args->csa_rclists == NULL))
+ goto out;
+
+ for (i = 0; i < args->csa_nrclists; i++) {
+ status = decode_rc_list(xdr, &args->csa_rclists[i]);
+ if (status)
+ goto out_free;
+ }
+ }
+ status = 0;
+
+ dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u "
+ "highestslotid %u cachethis %d nrclists %u\n",
+ __func__,
+ ((u32 *)&args->csa_sessionid)[0],
+ ((u32 *)&args->csa_sessionid)[1],
+ ((u32 *)&args->csa_sessionid)[2],
+ ((u32 *)&args->csa_sessionid)[3],
+ args->csa_sequenceid, args->csa_slotid,
+ args->csa_highestslotid, args->csa_cachethis,
+ args->csa_nrclists);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+
+out_free:
+ for (i = 0; i < args->csa_nrclists; i++)
+ kfree(args->csa_rclists[i].rcl_refcalls);
+ kfree(args->csa_rclists);
+ goto out;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
__be32 *p;
@@ -353,31 +477,134 @@
return status;
}
-static __be32 process_op(struct svc_rqst *rqstp,
+#if defined(CONFIG_NFS_V4_1)
+
+static unsigned encode_sessionid(struct xdr_stream *xdr,
+ const struct nfs4_sessionid *sid)
+{
+ uint32_t *p;
+ int len = NFS4_MAX_SESSIONID_LEN;
+
+ p = xdr_reserve_space(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ memcpy(p, sid, len);
+ return 0;
+}
+
+static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ const struct cb_sequenceres *res)
+{
+ uint32_t *p;
+ unsigned status = res->csr_status;
+
+ if (unlikely(status != 0))
+ goto out;
+
+ encode_sessionid(xdr, &res->csr_sessionid);
+
+ p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ *p++ = htonl(res->csr_sequenceid);
+ *p++ = htonl(res->csr_slotid);
+ *p++ = htonl(res->csr_highestslotid);
+ *p++ = htonl(res->csr_target_highestslotid);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+static __be32
+preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
+{
+ if (op_nr == OP_CB_SEQUENCE) {
+ if (nop != 0)
+ return htonl(NFS4ERR_SEQUENCE_POS);
+ } else {
+ if (nop == 0)
+ return htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ }
+
+ switch (op_nr) {
+ case OP_CB_GETATTR:
+ case OP_CB_RECALL:
+ case OP_CB_SEQUENCE:
+ *op = &callback_ops[op_nr];
+ break;
+
+ case OP_CB_LAYOUTRECALL:
+ case OP_CB_NOTIFY_DEVICEID:
+ case OP_CB_NOTIFY:
+ case OP_CB_PUSH_DELEG:
+ case OP_CB_RECALL_ANY:
+ case OP_CB_RECALLABLE_OBJ_AVAIL:
+ case OP_CB_RECALL_SLOT:
+ case OP_CB_WANTS_CANCELLED:
+ case OP_CB_NOTIFY_LOCK:
+ return htonl(NFS4ERR_NOTSUPP);
+
+ default:
+ return htonl(NFS4ERR_OP_ILLEGAL);
+ }
+
+ return htonl(NFS_OK);
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+static __be32
+preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
+{
+ return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+static __be32
+preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
+{
+ switch (op_nr) {
+ case OP_CB_GETATTR:
+ case OP_CB_RECALL:
+ *op = &callback_ops[op_nr];
+ break;
+ default:
+ return htonl(NFS4ERR_OP_ILLEGAL);
+ }
+
+ return htonl(NFS_OK);
+}
+
+static __be32 process_op(uint32_t minorversion, int nop,
+ struct svc_rqst *rqstp,
struct xdr_stream *xdr_in, void *argp,
struct xdr_stream *xdr_out, void *resp)
{
struct callback_op *op = &callback_ops[0];
unsigned int op_nr = OP_CB_ILLEGAL;
- __be32 status = 0;
+ __be32 status;
long maxlen;
__be32 res;
dprintk("%s: start\n", __func__);
status = decode_op_hdr(xdr_in, &op_nr);
- if (likely(status == 0)) {
- switch (op_nr) {
- case OP_CB_GETATTR:
- case OP_CB_RECALL:
- op = &callback_ops[op_nr];
- break;
- default:
- op_nr = OP_CB_ILLEGAL;
- op = &callback_ops[0];
- status = htonl(NFS4ERR_OP_ILLEGAL);
- }
+ if (unlikely(status)) {
+ status = htonl(NFS4ERR_OP_ILLEGAL);
+ goto out;
}
+ dprintk("%s: minorversion=%d nop=%d op_nr=%u\n",
+ __func__, minorversion, nop, op_nr);
+
+ status = minorversion ? preprocess_nfs41_op(nop, op_nr, &op) :
+ preprocess_nfs4_op(op_nr, &op);
+ if (status == htonl(NFS4ERR_OP_ILLEGAL))
+ op_nr = OP_CB_ILLEGAL;
+out:
maxlen = xdr_out->end - xdr_out->p;
if (maxlen > 0 && maxlen < PAGE_SIZE) {
if (likely(status == 0 && op->decode_args != NULL))
@@ -425,7 +652,8 @@
return rpc_system_err;
while (status == 0 && nops != hdr_arg.nops) {
- status = process_op(rqstp, &xdr_in, argp, &xdr_out, resp);
+ status = process_op(hdr_arg.minorversion, nops,
+ rqstp, &xdr_in, argp, &xdr_out, resp);
nops++;
}
@@ -452,7 +680,15 @@
.process_op = (callback_process_op_t)nfs4_callback_recall,
.decode_args = (callback_decode_arg_t)decode_recall_args,
.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
- }
+ },
+#if defined(CONFIG_NFS_V4_1)
+ [OP_CB_SEQUENCE] = {
+ .process_op = (callback_process_op_t)nfs4_callback_sequence,
+ .decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
+ .encode_res = (callback_encode_res_t)encode_cb_sequence_res,
+ .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
+ },
+#endif /* CONFIG_NFS_V4_1 */
};
/*
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 75c9cd2..c2d0616 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -37,6 +37,7 @@
#include <linux/in6.h>
#include <net/ipv6.h>
#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/bc_xprt.h>
#include <asm/system.h>
@@ -102,6 +103,7 @@
size_t addrlen;
const struct nfs_rpc_ops *rpc_ops;
int proto;
+ u32 minorversion;
};
/*
@@ -114,18 +116,13 @@
{
struct nfs_client *clp;
struct rpc_cred *cred;
+ int err = -ENOMEM;
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
goto error_0;
clp->rpc_ops = cl_init->rpc_ops;
- if (cl_init->rpc_ops->version == 4) {
- if (nfs_callback_up() < 0)
- goto error_2;
- __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
- }
-
atomic_set(&clp->cl_count, 1);
clp->cl_cons_state = NFS_CS_INITING;
@@ -133,9 +130,10 @@
clp->cl_addrlen = cl_init->addrlen;
if (cl_init->hostname) {
+ err = -ENOMEM;
clp->cl_hostname = kstrdup(cl_init->hostname, GFP_KERNEL);
if (!clp->cl_hostname)
- goto error_3;
+ goto error_cleanup;
}
INIT_LIST_HEAD(&clp->cl_superblocks);
@@ -150,6 +148,7 @@
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
+ clp->cl_minorversion = cl_init->minorversion;
#endif
cred = rpc_lookup_machine_cred();
if (!IS_ERR(cred))
@@ -159,13 +158,10 @@
return clp;
-error_3:
- if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down();
-error_2:
+error_cleanup:
kfree(clp);
error_0:
- return NULL;
+ return ERR_PTR(err);
}
static void nfs4_shutdown_client(struct nfs_client *clp)
@@ -182,12 +178,42 @@
}
/*
+ * Destroy the NFS4 callback service
+ */
+static void nfs4_destroy_callback(struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4
+ if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
+ nfs_callback_down(clp->cl_minorversion);
+#endif /* CONFIG_NFS_V4 */
+}
+
+/*
+ * Clears/puts all minor version specific parts from an nfs_client struct
+ * reverting it to minorversion 0.
+ */
+static void nfs4_clear_client_minor_version(struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp)) {
+ nfs4_destroy_session(clp->cl_session);
+ clp->cl_session = NULL;
+ }
+
+ clp->cl_call_sync = _nfs4_call_sync;
+#endif /* CONFIG_NFS_V4_1 */
+
+ nfs4_destroy_callback(clp);
+}
+
+/*
* Destroy a shared client record
*/
static void nfs_free_client(struct nfs_client *clp)
{
dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
+ nfs4_clear_client_minor_version(clp);
nfs4_shutdown_client(clp);
nfs_fscache_release_client_cookie(clp);
@@ -196,9 +222,6 @@
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
- if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down();
-
if (clp->cl_machine_cred != NULL)
put_rpccred(clp->cl_machine_cred);
@@ -347,7 +370,8 @@
struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
/* Don't match clients that failed to initialise properly */
- if (clp->cl_cons_state != NFS_CS_READY)
+ if (!(clp->cl_cons_state == NFS_CS_READY ||
+ clp->cl_cons_state == NFS_CS_SESSION_INITING))
continue;
/* Different NFS versions cannot share the same nfs_client */
@@ -420,7 +444,9 @@
if (clp->cl_proto != data->proto)
continue;
-
+ /* Match nfsv4 minorversion */
+ if (clp->cl_minorversion != data->minorversion)
+ continue;
/* Match the full socket address */
if (!nfs_sockaddr_cmp(sap, clap))
continue;
@@ -456,9 +482,10 @@
spin_unlock(&nfs_client_lock);
new = nfs_alloc_client(cl_init);
- } while (new);
+ } while (!IS_ERR(new));
- return ERR_PTR(-ENOMEM);
+ dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
+ return new;
/* install a new client and return with it unready */
install_client:
@@ -478,7 +505,7 @@
nfs_free_client(new);
error = wait_event_killable(nfs_client_active_wq,
- clp->cl_cons_state != NFS_CS_INITING);
+ clp->cl_cons_state < NFS_CS_INITING);
if (error < 0) {
nfs_put_client(clp);
return ERR_PTR(-ERESTARTSYS);
@@ -499,13 +526,29 @@
/*
* Mark a server as ready or failed
*/
-static void nfs_mark_client_ready(struct nfs_client *clp, int state)
+void nfs_mark_client_ready(struct nfs_client *clp, int state)
{
clp->cl_cons_state = state;
wake_up_all(&nfs_client_active_wq);
}
/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+int nfs4_check_client_ready(struct nfs_client *clp)
+{
+ if (!nfs4_has_session(clp))
+ return 0;
+ if (clp->cl_cons_state < NFS_CS_READY)
+ return -EPROTONOSUPPORT;
+ return 0;
+}
+
+/*
* Initialise the timeout values for a connection
*/
static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
@@ -1050,6 +1093,61 @@
#ifdef CONFIG_NFS_V4
/*
+ * Initialize the NFS4 callback service
+ */
+static int nfs4_init_callback(struct nfs_client *clp)
+{
+ int error;
+
+ if (clp->rpc_ops->version == 4) {
+ if (nfs4_has_session(clp)) {
+ error = xprt_setup_backchannel(
+ clp->cl_rpcclient->cl_xprt,
+ NFS41_BC_MIN_CALLBACKS);
+ if (error < 0)
+ return error;
+ }
+
+ error = nfs_callback_up(clp->cl_minorversion,
+ clp->cl_rpcclient->cl_xprt);
+ if (error < 0) {
+ dprintk("%s: failed to start callback. Error = %d\n",
+ __func__, error);
+ return error;
+ }
+ __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
+ }
+ return 0;
+}
+
+/*
+ * Initialize the minor version specific parts of an NFS4 client record
+ */
+static int nfs4_init_client_minor_version(struct nfs_client *clp)
+{
+ clp->cl_call_sync = _nfs4_call_sync;
+
+#if defined(CONFIG_NFS_V4_1)
+ if (clp->cl_minorversion) {
+ struct nfs4_session *session = NULL;
+ /*
+ * Create the session and mark it expired.
+ * When a SEQUENCE operation encounters the expired session
+ * it will do session recovery to initialize it.
+ */
+ session = nfs4_alloc_session(clp);
+ if (!session)
+ return -ENOMEM;
+
+ clp->cl_session = session;
+ clp->cl_call_sync = _nfs4_call_sync_session;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+
+ return nfs4_init_callback(clp);
+}
+
+/*
* Initialise an NFS4 client record
*/
static int nfs4_init_client(struct nfs_client *clp,
@@ -1083,7 +1181,12 @@
}
__set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
- nfs_mark_client_ready(clp, NFS_CS_READY);
+ error = nfs4_init_client_minor_version(clp);
+ if (error < 0)
+ goto error;
+
+ if (!nfs4_has_session(clp))
+ nfs_mark_client_ready(clp, NFS_CS_READY);
return 0;
error:
@@ -1101,7 +1204,8 @@
const size_t addrlen,
const char *ip_addr,
rpc_authflavor_t authflavour,
- int proto, const struct rpc_timeout *timeparms)
+ int proto, const struct rpc_timeout *timeparms,
+ u32 minorversion)
{
struct nfs_client_initdata cl_init = {
.hostname = hostname,
@@ -1109,6 +1213,7 @@
.addrlen = addrlen,
.rpc_ops = &nfs_v4_clientops,
.proto = proto,
+ .minorversion = minorversion,
};
struct nfs_client *clp;
int error;
@@ -1138,6 +1243,36 @@
}
/*
+ * Initialize a session.
+ * Note: save the mount rsize and wsize for create_server negotiation.
+ */
+static void nfs4_init_session(struct nfs_client *clp,
+ unsigned int wsize, unsigned int rsize)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (nfs4_has_session(clp)) {
+ clp->cl_session->fc_attrs.max_rqst_sz = wsize;
+ clp->cl_session->fc_attrs.max_resp_sz = rsize;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/*
+ * Session has been established, and the client marked ready.
+ * Set the mount rsize and wsize with negotiated fore channel
+ * attributes which will be bound checked in nfs_server_set_fsinfo.
+ */
+static void nfs4_session_set_rwsize(struct nfs_server *server)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (!nfs4_has_session(server->nfs_client))
+ return;
+ server->rsize = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ server->wsize = server->nfs_client->cl_session->fc_attrs.max_rqst_sz;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/*
* Create a version 4 volume record
*/
static int nfs4_init_server(struct nfs_server *server,
@@ -1164,7 +1299,8 @@
data->client_address,
data->auth_flavors[0],
data->nfs_server.protocol,
- &timeparms);
+ &timeparms,
+ data->minorversion);
if (error < 0)
goto error;
@@ -1214,6 +1350,8 @@
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
+ nfs4_init_session(server->nfs_client, server->wsize, server->rsize);
+
/* Probe the root fh to retrieve its FSID */
error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
if (error < 0)
@@ -1224,6 +1362,8 @@
(unsigned long long) server->fsid.minor);
dprintk("Mount FH: %d\n", mntfh->size);
+ nfs4_session_set_rwsize(server);
+
error = nfs_probe_fsinfo(server, mntfh, &fattr);
if (error < 0)
goto error;
@@ -1282,7 +1422,8 @@
parent_client->cl_ipaddr,
data->authflavor,
parent_server->client->cl_xprt->prot,
- parent_server->client->cl_timeout);
+ parent_server->client->cl_timeout,
+ parent_client->cl_minorversion);
if (error < 0)
goto error;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 968225a..af05b91 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -68,29 +68,26 @@
{
struct inode *inode = state->inode;
struct file_lock *fl;
- int status;
+ int status = 0;
+ if (inode->i_flock == NULL)
+ goto out;
+
+ /* Protect inode->i_flock using the BKL */
+ lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file) != ctx)
continue;
+ unlock_kernel();
status = nfs4_lock_delegation_recall(state, fl);
- if (status >= 0)
- continue;
- switch (status) {
- default:
- printk(KERN_ERR "%s: unhandled error %d.\n",
- __func__, status);
- case -NFS4ERR_EXPIRED:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
- case -NFS4ERR_STALE_CLIENTID:
- nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
- goto out_err;
- }
+ if (status < 0)
+ goto out;
+ lock_kernel();
}
- return 0;
-out_err:
+ unlock_kernel();
+out:
return status;
}
@@ -268,7 +265,10 @@
struct nfs_inode *nfsi = NFS_I(inode);
nfs_msync_inode(inode);
- /* Guard against new delegated open calls */
+ /*
+ * Guard against new delegated open/lock/unlock calls and against
+ * state recovery
+ */
down_write(&nfsi->rwsem);
nfs_delegation_claim_opens(inode, &delegation->stateid);
up_write(&nfsi->rwsem);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 08f6b04..489fc01 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -259,6 +259,9 @@
}
static const struct rpc_call_ops nfs_read_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_read_result,
.rpc_release = nfs_direct_read_release,
};
@@ -535,6 +538,9 @@
}
static const struct rpc_call_ops nfs_commit_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_commit_result,
.rpc_release = nfs_direct_commit_release,
};
@@ -673,6 +679,9 @@
}
static const struct rpc_call_ops nfs_write_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_write_result,
.rpc_release = nfs_direct_write_release,
};
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index ec7e27d..0055b81 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -48,6 +48,9 @@
size_t count, unsigned int flags);
static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
+static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t count, unsigned int flags);
static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
static int nfs_file_flush(struct file *, fl_owner_t id);
@@ -73,6 +76,7 @@
.lock = nfs_lock,
.flock = nfs_flock,
.splice_read = nfs_file_splice_read,
+ .splice_write = nfs_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = nfs_setlease,
};
@@ -587,12 +591,38 @@
goto out;
}
+static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t count, unsigned int flags)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ ssize_t ret;
+
+ dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) count, (unsigned long long) *ppos);
+
+ /*
+ * The combination of splice and an O_APPEND destination is disallowed.
+ */
+
+ nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
+
+ ret = generic_file_splice_write(pipe, filp, ppos, count, flags);
+ if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
+ int err = nfs_do_fsync(nfs_file_open_context(filp), inode);
+ if (err < 0)
+ ret = err;
+ }
+ return ret;
+}
+
static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
{
struct inode *inode = filp->f_mapping->host;
int status = 0;
- lock_kernel();
/* Try local locking first */
posix_test_lock(filp, fl);
if (fl->fl_type != F_UNLCK) {
@@ -608,7 +638,6 @@
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
out:
- unlock_kernel();
return status;
out_noconflict:
fl->fl_type = F_UNLCK;
@@ -650,13 +679,11 @@
* If we're signalled while cleaning up locks on process exit, we
* still need to complete the unlock.
*/
- lock_kernel();
/* Use local locking if mounted with "-onolock" */
if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
status = do_vfs_lock(filp, fl);
- unlock_kernel();
return status;
}
@@ -673,13 +700,11 @@
if (status != 0)
goto out;
- lock_kernel();
/* Use local locking if mounted with "-onolock" */
if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
status = do_vfs_lock(filp, fl);
- unlock_kernel();
if (status < 0)
goto out;
/*
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e4d6a83..7dd90a6 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -2,6 +2,7 @@
* NFS internal definitions
*/
+#include "nfs4_fs.h"
#include <linux/mount.h>
#include <linux/security.h>
@@ -17,6 +18,18 @@
*/
#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
+/*
+ * Determine if sessions are in use.
+ */
+static inline int nfs4_has_session(const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (clp->cl_session)
+ return 1;
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
@@ -30,6 +43,12 @@
};
/*
+ * Note: RFC 1813 doesn't limit the number of auth flavors that
+ * a server can return, so make something up.
+ */
+#define NFS_MAX_SECFLAVORS (12)
+
+/*
* In-kernel mount arguments
*/
struct nfs_parsed_mount_data {
@@ -44,6 +63,7 @@
unsigned int auth_flavor_len;
rpc_authflavor_t auth_flavors[1];
char *client_address;
+ unsigned int minorversion;
char *fscache_uniq;
struct {
@@ -77,6 +97,8 @@
unsigned short protocol;
struct nfs_fh *fh;
int noresvport;
+ unsigned int *auth_flav_len;
+ rpc_authflavor_t *auth_flavs;
};
extern int nfs_mount(struct nfs_mount_request *info);
@@ -99,6 +121,8 @@
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *);
+extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
+extern int nfs4_check_client_ready(struct nfs_client *clp);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
@@ -146,6 +170,20 @@
extern struct rpc_procinfo nfs3_procedures[];
extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
+/* nfs4proc.c */
+static inline void nfs4_restart_rpc(struct rpc_task *task,
+ const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp) &&
+ test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+ rpc_restart_call(task);
+}
+
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
@@ -205,6 +243,38 @@
const char *path);
#endif
+/* read.c */
+extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
+
+/* write.c */
+extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
+
+/* nfs4proc.c */
+extern int _nfs4_call_sync(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+extern int _nfs4_call_sync_session(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+
+#ifdef CONFIG_NFS_V4_1
+extern void nfs41_sequence_free_slot(const struct nfs_client *,
+ struct nfs4_sequence_res *res);
+#endif /* CONFIG_NFS_V4_1 */
+
+static inline void nfs4_sequence_free_slot(const struct nfs_client *clp,
+ struct nfs4_sequence_res *res)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp))
+ nfs41_sequence_free_slot(clp, res);
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* Determine the device name as a string
*/
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index ca905a5..38ef9ea 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -20,8 +20,116 @@
# define NFSDBG_FACILITY NFSDBG_MOUNT
#endif
+/*
+ * Defined by RFC 1094, section A.3; and RFC 1813, section 5.1.4
+ */
+#define MNTPATHLEN (1024)
+
+/*
+ * XDR data type sizes
+ */
+#define encode_dirpath_sz (1 + XDR_QUADLEN(MNTPATHLEN))
+#define MNT_status_sz (1)
+#define MNT_fhs_status_sz (1)
+#define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE)
+#define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE))
+#define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS)
+
+/*
+ * XDR argument and result sizes
+ */
+#define MNT_enc_dirpath_sz encode_dirpath_sz
+#define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz)
+#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \
+ MNT_authflav3_sz)
+
+/*
+ * Defined by RFC 1094, section A.5
+ */
+enum {
+ MOUNTPROC_NULL = 0,
+ MOUNTPROC_MNT = 1,
+ MOUNTPROC_DUMP = 2,
+ MOUNTPROC_UMNT = 3,
+ MOUNTPROC_UMNTALL = 4,
+ MOUNTPROC_EXPORT = 5,
+};
+
+/*
+ * Defined by RFC 1813, section 5.2
+ */
+enum {
+ MOUNTPROC3_NULL = 0,
+ MOUNTPROC3_MNT = 1,
+ MOUNTPROC3_DUMP = 2,
+ MOUNTPROC3_UMNT = 3,
+ MOUNTPROC3_UMNTALL = 4,
+ MOUNTPROC3_EXPORT = 5,
+};
+
static struct rpc_program mnt_program;
+/*
+ * Defined by OpenGroup XNFS Version 3W, chapter 8
+ */
+enum mountstat {
+ MNT_OK = 0,
+ MNT_EPERM = 1,
+ MNT_ENOENT = 2,
+ MNT_EACCES = 13,
+ MNT_EINVAL = 22,
+};
+
+static struct {
+ u32 status;
+ int errno;
+} mnt_errtbl[] = {
+ { .status = MNT_OK, .errno = 0, },
+ { .status = MNT_EPERM, .errno = -EPERM, },
+ { .status = MNT_ENOENT, .errno = -ENOENT, },
+ { .status = MNT_EACCES, .errno = -EACCES, },
+ { .status = MNT_EINVAL, .errno = -EINVAL, },
+};
+
+/*
+ * Defined by RFC 1813, section 5.1.5
+ */
+enum mountstat3 {
+ MNT3_OK = 0, /* no error */
+ MNT3ERR_PERM = 1, /* Not owner */
+ MNT3ERR_NOENT = 2, /* No such file or directory */
+ MNT3ERR_IO = 5, /* I/O error */
+ MNT3ERR_ACCES = 13, /* Permission denied */
+ MNT3ERR_NOTDIR = 20, /* Not a directory */
+ MNT3ERR_INVAL = 22, /* Invalid argument */
+ MNT3ERR_NAMETOOLONG = 63, /* Filename too long */
+ MNT3ERR_NOTSUPP = 10004, /* Operation not supported */
+ MNT3ERR_SERVERFAULT = 10006, /* A failure on the server */
+};
+
+static struct {
+ u32 status;
+ int errno;
+} mnt3_errtbl[] = {
+ { .status = MNT3_OK, .errno = 0, },
+ { .status = MNT3ERR_PERM, .errno = -EPERM, },
+ { .status = MNT3ERR_NOENT, .errno = -ENOENT, },
+ { .status = MNT3ERR_IO, .errno = -EIO, },
+ { .status = MNT3ERR_ACCES, .errno = -EACCES, },
+ { .status = MNT3ERR_NOTDIR, .errno = -ENOTDIR, },
+ { .status = MNT3ERR_INVAL, .errno = -EINVAL, },
+ { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
+ { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
+ { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
+};
+
+struct mountres {
+ int errno;
+ struct nfs_fh *fh;
+ unsigned int *auth_count;
+ rpc_authflavor_t *auth_flavors;
+};
+
struct mnt_fhstatus {
u32 status;
struct nfs_fh *fh;
@@ -35,8 +143,10 @@
*/
int nfs_mount(struct nfs_mount_request *info)
{
- struct mnt_fhstatus result = {
- .fh = info->fh
+ struct mountres result = {
+ .fh = info->fh,
+ .auth_count = info->auth_flav_len,
+ .auth_flavors = info->auth_flavs,
};
struct rpc_message msg = {
.rpc_argp = info->dirpath,
@@ -68,14 +178,14 @@
if (info->version == NFS_MNT3_VERSION)
msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT];
else
- msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT];
+ msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT];
status = rpc_call_sync(mnt_clnt, &msg, 0);
rpc_shutdown_client(mnt_clnt);
if (status < 0)
goto out_call_err;
- if (result.status != 0)
+ if (result.errno != 0)
goto out_mnt_err;
dprintk("NFS: MNT request succeeded\n");
@@ -86,72 +196,215 @@
out_clnt_err:
status = PTR_ERR(mnt_clnt);
- dprintk("NFS: failed to create RPC client, status=%d\n", status);
+ dprintk("NFS: failed to create MNT RPC client, status=%d\n", status);
goto out;
out_call_err:
- dprintk("NFS: failed to start MNT request, status=%d\n", status);
+ dprintk("NFS: MNT request failed, status=%d\n", status);
goto out;
out_mnt_err:
- dprintk("NFS: MNT server returned result %d\n", result.status);
- status = nfs_stat_to_errno(result.status);
+ dprintk("NFS: MNT server returned result %d\n", result.errno);
+ status = result.errno;
goto out;
}
/*
* XDR encode/decode functions for MOUNT
*/
-static int xdr_encode_dirpath(struct rpc_rqst *req, __be32 *p,
- const char *path)
-{
- p = xdr_encode_string(p, path);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+{
+ const u32 pathname_len = strlen(pathname);
+ __be32 *p;
+
+ if (unlikely(pathname_len > MNTPATHLEN))
+ return -EIO;
+
+ p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len);
+ if (unlikely(p == NULL))
+ return -EIO;
+ xdr_encode_opaque(p, pathname, pathname_len);
+
return 0;
}
-static int xdr_decode_fhstatus(struct rpc_rqst *req, __be32 *p,
- struct mnt_fhstatus *res)
+static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p,
+ const char *dirpath)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ return encode_mntdirpath(&xdr, dirpath);
+}
+
+/*
+ * RFC 1094: "A non-zero status indicates some sort of error. In this
+ * case, the status is a UNIX error number." This can be problematic
+ * if the server and client use different errno values for the same
+ * error.
+ *
+ * However, the OpenGroup XNFS spec provides a simple mapping that is
+ * independent of local errno values on the server and the client.
+ */
+static int decode_status(struct xdr_stream *xdr, struct mountres *res)
+{
+ unsigned int i;
+ u32 status;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, sizeof(status));
+ if (unlikely(p == NULL))
+ return -EIO;
+ status = ntohl(*p);
+
+ for (i = 0; i <= ARRAY_SIZE(mnt_errtbl); i++) {
+ if (mnt_errtbl[i].status == status) {
+ res->errno = mnt_errtbl[i].errno;
+ return 0;
+ }
+ }
+
+ dprintk("NFS: unrecognized MNT status code: %u\n", status);
+ res->errno = -EACCES;
+ return 0;
+}
+
+static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res)
{
struct nfs_fh *fh = res->fh;
+ __be32 *p;
- if ((res->status = ntohl(*p++)) == 0) {
- fh->size = NFS2_FHSIZE;
- memcpy(fh->data, p, NFS2_FHSIZE);
- }
+ p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ fh->size = NFS2_FHSIZE;
+ memcpy(fh->data, p, NFS2_FHSIZE);
return 0;
}
-static int xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p,
- struct mnt_fhstatus *res)
+static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p,
+ struct mountres *res)
+{
+ struct xdr_stream xdr;
+ int status;
+
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+
+ status = decode_status(&xdr, res);
+ if (unlikely(status != 0 || res->errno != 0))
+ return status;
+ return decode_fhandle(&xdr, res);
+}
+
+static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
+{
+ unsigned int i;
+ u32 status;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, sizeof(status));
+ if (unlikely(p == NULL))
+ return -EIO;
+ status = ntohl(*p);
+
+ for (i = 0; i <= ARRAY_SIZE(mnt3_errtbl); i++) {
+ if (mnt3_errtbl[i].status == status) {
+ res->errno = mnt3_errtbl[i].errno;
+ return 0;
+ }
+ }
+
+ dprintk("NFS: unrecognized MNT3 status code: %u\n", status);
+ res->errno = -EACCES;
+ return 0;
+}
+
+static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res)
{
struct nfs_fh *fh = res->fh;
- unsigned size;
+ u32 size;
+ __be32 *p;
- if ((res->status = ntohl(*p++)) == 0) {
- size = ntohl(*p++);
- if (size <= NFS3_FHSIZE && size != 0) {
- fh->size = size;
- memcpy(fh->data, p, size);
- } else
- res->status = -EBADHANDLE;
- }
+ p = xdr_inline_decode(xdr, sizeof(size));
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ size = ntohl(*p++);
+ if (size > NFS3_FHSIZE || size == 0)
+ return -EIO;
+
+ p = xdr_inline_decode(xdr, size);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ fh->size = size;
+ memcpy(fh->data, p, size);
return 0;
}
-#define MNT_dirpath_sz (1 + 256)
-#define MNT_fhstatus_sz (1 + 8)
-#define MNT_fhstatus3_sz (1 + 16)
+static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
+{
+ rpc_authflavor_t *flavors = res->auth_flavors;
+ unsigned int *count = res->auth_count;
+ u32 entries, i;
+ __be32 *p;
+
+ if (*count == 0)
+ return 0;
+
+ p = xdr_inline_decode(xdr, sizeof(entries));
+ if (unlikely(p == NULL))
+ return -EIO;
+ entries = ntohl(*p);
+ dprintk("NFS: received %u auth flavors\n", entries);
+ if (entries > NFS_MAX_SECFLAVORS)
+ entries = NFS_MAX_SECFLAVORS;
+
+ p = xdr_inline_decode(xdr, sizeof(u32) * entries);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ if (entries > *count)
+ entries = *count;
+
+ for (i = 0; i < entries; i++) {
+ flavors[i] = ntohl(*p++);
+ dprintk("NFS:\tflavor %u: %d\n", i, flavors[i]);
+ }
+ *count = i;
+
+ return 0;
+}
+
+static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p,
+ struct mountres *res)
+{
+ struct xdr_stream xdr;
+ int status;
+
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+
+ status = decode_fhs_status(&xdr, res);
+ if (unlikely(status != 0 || res->errno != 0))
+ return status;
+ status = decode_fhandle3(&xdr, res);
+ if (unlikely(status != 0)) {
+ res->errno = -EBADHANDLE;
+ return 0;
+ }
+ return decode_auth_flavors(&xdr, res);
+}
static struct rpc_procinfo mnt_procedures[] = {
- [MNTPROC_MNT] = {
- .p_proc = MNTPROC_MNT,
- .p_encode = (kxdrproc_t) xdr_encode_dirpath,
- .p_decode = (kxdrproc_t) xdr_decode_fhstatus,
- .p_arglen = MNT_dirpath_sz,
- .p_replen = MNT_fhstatus_sz,
- .p_statidx = MNTPROC_MNT,
+ [MOUNTPROC_MNT] = {
+ .p_proc = MOUNTPROC_MNT,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_decode = (kxdrproc_t)mnt_dec_mountres,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_replen = MNT_dec_mountres_sz,
+ .p_statidx = MOUNTPROC_MNT,
.p_name = "MOUNT",
},
};
@@ -159,10 +412,10 @@
static struct rpc_procinfo mnt3_procedures[] = {
[MOUNTPROC3_MNT] = {
.p_proc = MOUNTPROC3_MNT,
- .p_encode = (kxdrproc_t) xdr_encode_dirpath,
- .p_decode = (kxdrproc_t) xdr_decode_fhstatus3,
- .p_arglen = MNT_dirpath_sz,
- .p_replen = MNT_fhstatus3_sz,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_decode = (kxdrproc_t)mnt_dec_mountres3,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_replen = MNT_dec_mountres3_sz,
.p_statidx = MOUNTPROC3_MNT,
.p_name = "MOUNT",
},
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index f01caec..40c7667 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -65,6 +65,11 @@
dentry = dentry->d_parent;
}
spin_unlock(&dcache_lock);
+ if (*end != '/') {
+ if (--buflen < 0)
+ goto Elong;
+ *--end = '/';
+ }
namelen = strlen(base);
/* Strip off excess slashes in base string */
while (namelen > 0 && base[namelen - 1] == '/')
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 6bbf0e6..bac6051 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -207,8 +207,6 @@
status = nfs_revalidate_inode(server, inode);
if (status < 0)
return ERR_PTR(status);
- if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
- nfs_zap_acl_cache(inode);
acl = nfs3_get_cached_acl(inode, type);
if (acl != ERR_PTR(-EAGAIN))
return acl;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 84345de..61bc3a3 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@
NFS4CLNT_RECLAIM_REBOOT,
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
+ NFS4CLNT_SESSION_SETUP,
};
/*
@@ -177,6 +178,14 @@
int state_flag_bit;
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
+ int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
+ struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
+};
+
+struct nfs4_state_maintenance_ops {
+ int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *);
+ struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
+ int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
};
extern const struct dentry_operations nfs4_dentry_operations;
@@ -193,6 +202,7 @@
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
@@ -200,8 +210,26 @@
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page);
-extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
-extern struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops;
+extern struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[];
+extern struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[];
+#if defined(CONFIG_NFS_V4_1)
+extern int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ int cache_reply, struct rpc_task *task);
+extern void nfs4_destroy_session(struct nfs4_session *session);
+extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
+extern int nfs4_proc_create_session(struct nfs_client *, int reset);
+extern int nfs4_proc_destroy_session(struct nfs4_session *);
+#else /* CONFIG_NFS_v4_1 */
+static inline int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ int cache_reply, struct rpc_task *task)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
extern const u32 nfs4_fattr_bitmap[2];
extern const u32 nfs4_statfs_bitmap[2];
@@ -216,7 +244,12 @@
extern void nfs4_renew_state(struct work_struct *);
/* nfs4state.c */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
+#if defined(CONFIG_NFS_V4_1)
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
+struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
+#endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4674f80..92ce435 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -48,11 +48,14 @@
#include <linux/smp_lock.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/module.h>
+#include <linux/sunrpc/bc_xprt.h>
#include "nfs4_fs.h"
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
+#include "callback.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -247,7 +250,25 @@
ret = nfs4_wait_clnt_recover(clp);
if (ret == 0)
exception->retry = 1;
+#if !defined(CONFIG_NFS_V4_1)
break;
+#else /* !defined(CONFIG_NFS_V4_1) */
+ if (!nfs4_has_session(server->nfs_client))
+ break;
+ /* FALLTHROUGH */
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR: %d Reset session\n", __func__,
+ errorcode);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ exception->retry = 1;
+ /* FALLTHROUGH */
+#endif /* !defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
@@ -271,6 +292,353 @@
spin_unlock(&clp->cl_lock);
}
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * nfs4_free_slot - free a slot and efficiently update slot table.
+ *
+ * freeing a slot is trivially done by clearing its respective bit
+ * in the bitmap.
+ * If the freed slotid equals highest_used_slotid we want to update it
+ * so that the server would be able to size down the slot table if needed,
+ * otherwise we know that the highest_used_slotid is still in use.
+ * When updating highest_used_slotid there may be "holes" in the bitmap
+ * so we need to scan down from highest_used_slotid to 0 looking for the now
+ * highest slotid in use.
+ * If none found, highest_used_slotid is set to -1.
+ */
+static void
+nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
+{
+ int slotid = free_slotid;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ /* clear used bit in bitmap */
+ __clear_bit(slotid, tbl->used_slots);
+
+ /* update highest_used_slotid when it is freed */
+ if (slotid == tbl->highest_used_slotid) {
+ slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
+ if (slotid >= 0 && slotid < tbl->max_slots)
+ tbl->highest_used_slotid = slotid;
+ else
+ tbl->highest_used_slotid = -1;
+ }
+ rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
+ free_slotid, tbl->highest_used_slotid);
+}
+
+void nfs41_sequence_free_slot(const struct nfs_client *clp,
+ struct nfs4_sequence_res *res)
+{
+ struct nfs4_slot_table *tbl;
+
+ if (!nfs4_has_session(clp)) {
+ dprintk("%s: No session\n", __func__);
+ return;
+ }
+ tbl = &clp->cl_session->fc_slot_table;
+ if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
+ dprintk("%s: No slot\n", __func__);
+ /* just wake up the next guy waiting since
+ * we may have not consumed a slot after all */
+ rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ return;
+ }
+ nfs4_free_slot(tbl, res->sr_slotid);
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+}
+
+static void nfs41_sequence_done(struct nfs_client *clp,
+ struct nfs4_sequence_res *res,
+ int rpc_status)
+{
+ unsigned long timestamp;
+ struct nfs4_slot_table *tbl;
+ struct nfs4_slot *slot;
+
+ /*
+ * sr_status remains 1 if an RPC level error occurred. The server
+ * may or may not have processed the sequence operation..
+ * Proceed as if the server received and processed the sequence
+ * operation.
+ */
+ if (res->sr_status == 1)
+ res->sr_status = NFS_OK;
+
+ /* -ERESTARTSYS can result in skipping nfs41_sequence_setup */
+ if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
+ goto out;
+
+ tbl = &clp->cl_session->fc_slot_table;
+ slot = tbl->slots + res->sr_slotid;
+
+ if (res->sr_status == 0) {
+ /* Update the slot's sequence and clientid lease timer */
+ ++slot->seq_nr;
+ timestamp = res->sr_renewal_time;
+ spin_lock(&clp->cl_lock);
+ if (time_before(clp->cl_last_renewal, timestamp))
+ clp->cl_last_renewal = timestamp;
+ spin_unlock(&clp->cl_lock);
+ return;
+ }
+out:
+ /* The session may be reset by one of the error handlers. */
+ dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
+ nfs41_sequence_free_slot(clp, res);
+}
+
+/*
+ * nfs4_find_slot - efficiently look for a free slot
+ *
+ * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
+ * If found, we mark the slot as used, update the highest_used_slotid,
+ * and respectively set up the sequence operation args.
+ * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
+ *
+ * Note: must be called with under the slot_tbl_lock.
+ */
+static u8
+nfs4_find_slot(struct nfs4_slot_table *tbl, struct rpc_task *task)
+{
+ int slotid;
+ u8 ret_id = NFS4_MAX_SLOT_TABLE;
+ BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
+
+ dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid,
+ tbl->max_slots);
+ slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
+ if (slotid >= tbl->max_slots)
+ goto out;
+ __set_bit(slotid, tbl->used_slots);
+ if (slotid > tbl->highest_used_slotid)
+ tbl->highest_used_slotid = slotid;
+ ret_id = slotid;
+out:
+ dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
+ return ret_id;
+}
+
+static int nfs4_recover_session(struct nfs4_session *session)
+{
+ struct nfs_client *clp = session->clp;
+ int ret;
+
+ for (;;) {
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret != 0)
+ return ret;
+ if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ break;
+ nfs4_schedule_state_manager(clp);
+ }
+ return 0;
+}
+
+static int nfs41_setup_sequence(struct nfs4_session *session,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply,
+ struct rpc_task *task)
+{
+ struct nfs4_slot *slot;
+ struct nfs4_slot_table *tbl;
+ int status = 0;
+ u8 slotid;
+
+ dprintk("--> %s\n", __func__);
+ /* slot already allocated? */
+ if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
+ return 0;
+
+ memset(res, 0, sizeof(*res));
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ tbl = &session->fc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) {
+ if (tbl->highest_used_slotid != -1) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("<-- %s: Session reset: draining\n", __func__);
+ return -EAGAIN;
+ }
+
+ /* The slot table is empty; start the reset thread */
+ dprintk("%s Session Reset\n", __func__);
+ spin_unlock(&tbl->slot_tbl_lock);
+ status = nfs4_recover_session(session);
+ if (status)
+ return status;
+ spin_lock(&tbl->slot_tbl_lock);
+ }
+
+ slotid = nfs4_find_slot(tbl, task);
+ if (slotid == NFS4_MAX_SLOT_TABLE) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("<-- %s: no free slots\n", __func__);
+ return -EAGAIN;
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+
+ slot = tbl->slots + slotid;
+ args->sa_session = session;
+ args->sa_slotid = slotid;
+ args->sa_cache_this = cache_reply;
+
+ dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
+
+ res->sr_session = session;
+ res->sr_slotid = slotid;
+ res->sr_renewal_time = jiffies;
+ /*
+ * sr_status is only set in decode_sequence, and so will remain
+ * set to 1 if an rpc level failure occurs.
+ */
+ res->sr_status = 1;
+ return 0;
+}
+
+int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply,
+ struct rpc_task *task)
+{
+ int ret = 0;
+
+ dprintk("--> %s clp %p session %p sr_slotid %d\n",
+ __func__, clp, clp->cl_session, res->sr_slotid);
+
+ if (!nfs4_has_session(clp))
+ goto out;
+ ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
+ task);
+ if (ret != -EAGAIN) {
+ /* terminate rpc task */
+ task->tk_status = ret;
+ task->tk_action = NULL;
+ }
+out:
+ dprintk("<-- %s status=%d\n", __func__, ret);
+ return ret;
+}
+
+struct nfs41_call_sync_data {
+ struct nfs_client *clp;
+ struct nfs4_sequence_args *seq_args;
+ struct nfs4_sequence_res *seq_res;
+ int cache_reply;
+};
+
+static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs41_call_sync_data *data = calldata;
+
+ dprintk("--> %s data->clp->cl_session %p\n", __func__,
+ data->clp->cl_session);
+ if (nfs4_setup_sequence(data->clp, data->seq_args,
+ data->seq_res, data->cache_reply, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs41_call_sync_data *data = calldata;
+
+ nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
+ nfs41_sequence_free_slot(data->clp, data->seq_res);
+}
+
+struct rpc_call_ops nfs41_call_sync_ops = {
+ .rpc_call_prepare = nfs41_call_sync_prepare,
+ .rpc_call_done = nfs41_call_sync_done,
+};
+
+static int nfs4_call_sync_sequence(struct nfs_client *clp,
+ struct rpc_clnt *clnt,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ int ret;
+ struct rpc_task *task;
+ struct nfs41_call_sync_data data = {
+ .clp = clp,
+ .seq_args = args,
+ .seq_res = res,
+ .cache_reply = cache_reply,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clnt,
+ .rpc_message = msg,
+ .callback_ops = &nfs41_call_sync_ops,
+ .callback_data = &data
+ };
+
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ task = rpc_run_task(&task_setup);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else {
+ ret = task->tk_status;
+ rpc_put_task(task);
+ }
+ return ret;
+}
+
+int _nfs4_call_sync_session(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ return nfs4_call_sync_sequence(server->nfs_client, server->client,
+ msg, args, res, cache_reply);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+int _nfs4_call_sync(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ args->sa_session = res->sr_session = NULL;
+ return rpc_call_sync(server->client, msg, 0);
+}
+
+#define nfs4_call_sync(server, msg, args, res, cache_reply) \
+ (server)->nfs_client->cl_call_sync((server), (msg), &(args)->seq_args, \
+ &(res)->seq_res, (cache_reply))
+
+static void nfs4_sequence_done(const struct nfs_server *server,
+ struct nfs4_sequence_res *res, int rpc_status)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(server->nfs_client))
+ nfs41_sequence_done(server->nfs_client, res, rpc_status);
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/* no restart, therefore free slot here */
+static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
+ struct nfs4_sequence_res *res,
+ int rpc_status)
+{
+ nfs4_sequence_done(server, res, rpc_status);
+ nfs4_sequence_free_slot(server->nfs_client, res);
+}
+
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -312,6 +680,7 @@
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
+ p->o_res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
@@ -804,16 +1173,30 @@
err = _nfs4_open_delegation_recall(ctx, state, stateid);
switch (err) {
case 0:
- return err;
+ case -ENOENT:
+ case -ESTALE:
+ goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */
nfs4_schedule_state_recovery(server->nfs_client);
- return err;
+ goto out;
+ case -ERESTARTSYS:
+ /*
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+ case -ENOMEM:
+ err = 0;
+ goto out;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
+out:
return err;
}
@@ -929,6 +1312,10 @@
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
}
data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
+ &data->o_arg.seq_args,
+ &data->o_res.seq_res, 1, task))
+ return;
rpc_call_start(task);
return;
out_no_action:
@@ -941,6 +1328,10 @@
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
+
+ nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res,
+ task->tk_status);
+
if (RPC_ASSASSINATED(task))
return;
if (task->tk_status == 0) {
@@ -1269,7 +1660,7 @@
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
return status;
@@ -1318,6 +1709,7 @@
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
+ nfs4_sequence_done(server, &calldata->res.seq_res, task->tk_status);
if (RPC_ASSASSINATED(task))
return;
/* hmm. we are done with the inode, and in the process of freeing
@@ -1336,10 +1728,11 @@
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return;
}
}
+ nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
@@ -1380,6 +1773,10 @@
calldata->arg.fmode = FMODE_WRITE;
}
calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
+ &calldata->arg.seq_args, &calldata->res.seq_res,
+ 1, task))
+ return;
rpc_call_start(task);
}
@@ -1419,13 +1816,15 @@
};
int status = -ENOMEM;
- calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
+ calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
if (calldata == NULL)
goto out;
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
calldata->arg.stateid = &state->open_stateid;
+ if (nfs4_has_session(server->nfs_client))
+ memset(calldata->arg.stateid->data, 0, 4); /* clear seqid */
/* Serialization for the sequence id */
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
if (calldata->arg.seqid == NULL)
@@ -1435,6 +1834,7 @@
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
+ calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
calldata->path.mnt = mntget(path->mnt);
calldata->path.dentry = dget(path->dentry);
@@ -1584,15 +1984,18 @@
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
+ struct nfs4_server_caps_arg args = {
+ .fhandle = fhandle,
+ };
struct nfs4_server_caps_res res = {};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
- .rpc_argp = fhandle,
+ .rpc_argp = &args,
.rpc_resp = &res,
};
int status;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
@@ -1606,6 +2009,7 @@
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
server->acl_bitmask = res.acl_bitmask;
}
+
return status;
}
@@ -1637,8 +2041,15 @@
.rpc_argp = &args,
.rpc_resp = &res,
};
+ int status;
+
nfs_fattr_init(info->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_recover_expired_lease(server);
+ if (!status)
+ status = nfs4_check_client_ready(server->nfs_client);
+ if (!status)
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ return status;
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -1728,7 +2139,7 @@
};
nfs_fattr_init(fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
@@ -1812,7 +2223,7 @@
nfs_fattr_init(fattr);
dprintk("NFS call lookupfh %s\n", name->name);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
dprintk("NFS reply lookupfh: %d\n", status);
return status;
}
@@ -1898,7 +2309,7 @@
args.access |= NFS4_ACCESS_EXECUTE;
}
nfs_fattr_init(&fattr);
- status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (!status) {
entry->mask = 0;
if (res.access & NFS4_ACCESS_READ)
@@ -1957,13 +2368,14 @@
.pglen = pglen,
.pages = &page,
};
+ struct nfs4_readlink_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
.rpc_argp = &args,
- .rpc_resp = NULL,
+ .rpc_resp = &res,
};
- return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ return nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
}
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
@@ -2057,7 +2469,7 @@
int status;
nfs_fattr_init(&res.dir_attr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, &res.dir_attr);
@@ -2092,8 +2504,10 @@
{
struct nfs_removeres *res = task->tk_msg.rpc_resp;
+ nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
+ nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res);
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, &res->dir_attr);
return 1;
@@ -2125,7 +2539,7 @@
nfs_fattr_init(res.old_fattr);
nfs_fattr_init(res.new_fattr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
@@ -2174,7 +2588,7 @@
nfs_fattr_init(res.fattr);
nfs_fattr_init(res.dir_attr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
@@ -2235,7 +2649,8 @@
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
{
- int status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0);
+ int status = nfs4_call_sync(NFS_SERVER(dir), &data->msg,
+ &data->arg, &data->res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
nfs_post_op_update_inode(dir, data->res.dir_fattr);
@@ -2344,7 +2759,7 @@
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
- status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0);
if (status == 0)
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
@@ -2422,14 +2837,17 @@
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_statfs_res res = {
+ .fsstat = fsstat,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
.rpc_argp = &args,
- .rpc_resp = fsstat,
+ .rpc_resp = &res,
};
nfs_fattr_init(fsstat->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
@@ -2451,13 +2869,16 @@
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_fsinfo_res res = {
+ .fsinfo = fsinfo,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
.rpc_argp = &args,
- .rpc_resp = fsinfo,
+ .rpc_resp = &res,
};
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
@@ -2486,10 +2907,13 @@
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_pathconf_res res = {
+ .pathconf = pathconf,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
.rpc_argp = &args,
- .rpc_resp = pathconf,
+ .rpc_resp = &res,
};
/* None of the pathconf attributes are mandatory to implement */
@@ -2499,7 +2923,7 @@
}
nfs_fattr_init(pathconf->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -2520,8 +2944,13 @@
{
struct nfs_server *server = NFS_SERVER(data->inode);
+ dprintk("--> %s\n", __func__);
+
+ /* nfs4_sequence_free_slot called in the read rpc_call_done */
+ nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
+
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
@@ -2541,8 +2970,12 @@
{
struct inode *inode = data->inode;
+ /* slot is freed in nfs_writeback_done */
+ nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
+ task->tk_status);
+
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@@ -2567,10 +3000,14 @@
{
struct inode *inode = data->inode;
+ nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
+ task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
+ nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
+ &data->res.seq_res);
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
@@ -2603,6 +3040,9 @@
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
+ dprintk("%s calling put_rpccred on rpc_cred %p\n", __func__,
+ task->tk_msg.rpc_cred);
+ put_rpccred(task->tk_msg.rpc_cred);
}
static const struct rpc_call_ops nfs4_renew_ops = {
@@ -2742,12 +3182,14 @@
.acl_pages = pages,
.acl_len = buflen,
};
- size_t resp_len = buflen;
+ struct nfs_getaclres res = {
+ .acl_len = buflen,
+ };
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
- .rpc_resp = &resp_len,
+ .rpc_resp = &res,
};
struct page *localpage = NULL;
int ret;
@@ -2761,26 +3203,26 @@
return -ENOMEM;
args.acl_pages[0] = localpage;
args.acl_pgbase = 0;
- resp_len = args.acl_len = PAGE_SIZE;
+ args.acl_len = PAGE_SIZE;
} else {
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
}
- ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ ret = nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
if (ret)
goto out_free;
- if (resp_len > args.acl_len)
- nfs4_write_cached_acl(inode, NULL, resp_len);
+ if (res.acl_len > args.acl_len)
+ nfs4_write_cached_acl(inode, NULL, res.acl_len);
else
- nfs4_write_cached_acl(inode, resp_buf, resp_len);
+ nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
if (buf) {
ret = -ERANGE;
- if (resp_len > buflen)
+ if (res.acl_len > buflen)
goto out_free;
if (localpage)
- memcpy(buf, resp_buf, resp_len);
+ memcpy(buf, resp_buf, res.acl_len);
}
- ret = resp_len;
+ ret = res.acl_len;
out_free:
if (localpage)
__free_page(localpage);
@@ -2810,8 +3252,6 @@
ret = nfs_revalidate_inode(server, inode);
if (ret < 0)
return ret;
- if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
- nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
return ret;
@@ -2827,10 +3267,11 @@
.acl_pages = pages,
.acl_len = buflen,
};
+ struct nfs_setaclres res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
.rpc_argp = &arg,
- .rpc_resp = NULL,
+ .rpc_resp = &res,
};
int ret;
@@ -2838,7 +3279,7 @@
return -EOPNOTSUPP;
nfs_inode_return_delegation(inode);
buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
- ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
return ret;
@@ -2857,10 +3298,8 @@
}
static int
-nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
+_nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs_client *clp, struct nfs4_state *state)
{
- struct nfs_client *clp = server->nfs_client;
-
if (!clp || task->tk_status >= 0)
return 0;
switch(task->tk_status) {
@@ -2879,8 +3318,23 @@
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0;
return -EAGAIN;
+#if defined(CONFIG_NFS_V4_1)
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session\n", __func__,
+ task->tk_status);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ task->tk_status = 0;
+ return -EAGAIN;
+#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
- nfs_inc_server_stats(server, NFSIOS_DELAY);
+ if (server)
+ nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0;
@@ -2893,6 +3347,12 @@
return 0;
}
+static int
+nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
+{
+ return _nfs4_async_handle_error(task, server, server->nfs_client, state);
+}
+
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
{
nfs4_verifier sc_verifier;
@@ -3000,6 +3460,10 @@
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
+
+ nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res,
+ task->tk_status);
+
data->rpc_status = task->tk_status;
if (data->rpc_status == 0)
renew_lease(data->res.server, data->timestamp);
@@ -3010,7 +3474,25 @@
kfree(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_delegreturndata *d_data;
+
+ d_data = (struct nfs4_delegreturndata *)data;
+
+ if (nfs4_setup_sequence(d_data->res.server->nfs_client,
+ &d_data->args.seq_args,
+ &d_data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs4_delegreturn_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs4_delegreturn_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs4_delegreturn_done,
.rpc_release = nfs4_delegreturn_release,
};
@@ -3032,7 +3514,7 @@
};
int status = 0;
- data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
data->args.fhandle = &data->fh;
@@ -3042,6 +3524,7 @@
memcpy(&data->stateid, stateid, sizeof(data->stateid));
data->res.fattr = &data->fattr;
data->res.server = server;
+ data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
@@ -3127,7 +3610,7 @@
goto out;
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_id.id;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
switch (status) {
case 0:
request->fl_type = F_UNLCK;
@@ -3187,13 +3670,14 @@
struct nfs4_unlockdata *p;
struct inode *inode = lsp->ls_state->inode;
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
@@ -3217,6 +3701,8 @@
{
struct nfs4_unlockdata *calldata = data;
+ nfs4_sequence_done(calldata->server, &calldata->res.seq_res,
+ task->tk_status);
if (RPC_ASSASSINATED(task))
return;
switch (task->tk_status) {
@@ -3233,8 +3719,11 @@
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
- rpc_restart_call(task);
+ nfs4_restart_rpc(task,
+ calldata->server->nfs_client);
}
+ nfs4_sequence_free_slot(calldata->server->nfs_client,
+ &calldata->res.seq_res);
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
@@ -3249,6 +3738,10 @@
return;
}
calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(calldata->server->nfs_client,
+ &calldata->arg.seq_args,
+ &calldata->res.seq_res, 1, task))
+ return;
rpc_call_start(task);
}
@@ -3341,6 +3834,7 @@
unsigned long timestamp;
int rpc_status;
int cancelled;
+ struct nfs_server *server;
};
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
@@ -3366,7 +3860,9 @@
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
p->res.lock_seqid = p->arg.lock_seqid;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
p->lsp = lsp;
+ p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
@@ -3396,6 +3892,9 @@
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
rpc_call_start(task);
dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
@@ -3406,6 +3905,9 @@
dprintk("%s: begin!\n", __func__);
+ nfs4_sequence_done_free_slot(data->server, &data->res.seq_res,
+ task->tk_status);
+
data->rpc_status = task->tk_status;
if (RPC_ASSASSINATED(task))
goto out;
@@ -3487,8 +3989,6 @@
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
- if (ret == -NFS4ERR_DENIED)
- ret = -EAGAIN;
} else
data->cancelled = 1;
rpc_put_task(task);
@@ -3576,9 +4076,11 @@
int err;
do {
+ err = _nfs4_proc_setlk(state, cmd, request);
+ if (err == -NFS4ERR_DENIED)
+ err = -EAGAIN;
err = nfs4_handle_exception(NFS_SERVER(state->inode),
- _nfs4_proc_setlk(state, cmd, request),
- &exception);
+ err, &exception);
} while (exception.retry);
return err;
}
@@ -3630,8 +4132,37 @@
goto out;
do {
err = _nfs4_do_setlk(state, F_SETLK, fl, 0);
- if (err != -NFS4ERR_DELAY)
- break;
+ switch (err) {
+ default:
+ printk(KERN_ERR "%s: unhandled error %d.\n",
+ __func__, err);
+ case 0:
+ case -ESTALE:
+ goto out;
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ nfs4_schedule_state_recovery(server->nfs_client);
+ goto out;
+ case -ERESTARTSYS:
+ /*
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OPENMODE:
+ nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+ err = 0;
+ goto out;
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
+ /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ err = 0;
+ goto out;
+ case -NFS4ERR_DELAY:
+ break;
+ }
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
@@ -3706,10 +4237,13 @@
.page = page,
.bitmask = bitmask,
};
+ struct nfs4_fs_locations_res res = {
+ .fs_locations = fs_locations,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
.rpc_argp = &args,
- .rpc_resp = fs_locations,
+ .rpc_resp = &res,
};
int status;
@@ -3717,24 +4251,720 @@
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
nfs_fixup_referral_attributes(&fs_locations->fattr);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
-struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = {
+#ifdef CONFIG_NFS_V4_1
+/*
+ * nfs4_proc_exchange_id()
+ *
+ * Since the clientid has expired, all compounds using sessions
+ * associated with the stale clientid will be returning
+ * NFS4ERR_BADSESSION in the sequence operation, and will therefore
+ * be in some phase of session reset.
+ */
+static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ nfs4_verifier verifier;
+ struct nfs41_exchange_id_args args = {
+ .client = clp,
+ .flags = clp->cl_exchange_flags,
+ };
+ struct nfs41_exchange_id_res res = {
+ .client = clp,
+ };
+ int status;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ __be32 *p;
+
+ dprintk("--> %s\n", __func__);
+ BUG_ON(clp == NULL);
+
+ p = (u32 *)verifier.data;
+ *p++ = htonl((u32)clp->cl_boot_time.tv_sec);
+ *p = htonl((u32)clp->cl_boot_time.tv_nsec);
+ args.verifier = &verifier;
+
+ while (1) {
+ args.id_len = scnprintf(args.id, sizeof(args.id),
+ "%s/%s %u",
+ clp->cl_ipaddr,
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR),
+ clp->cl_id_uniquifier);
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+
+ if (status != NFS4ERR_CLID_INUSE)
+ break;
+
+ if (signalled())
+ break;
+
+ if (++clp->cl_id_uniquifier == 0)
+ break;
+ }
+
+ dprintk("<-- %s status= %d\n", __func__, status);
+ return status;
+}
+
+struct nfs4_get_lease_time_data {
+ struct nfs4_get_lease_time_args *args;
+ struct nfs4_get_lease_time_res *res;
+ struct nfs_client *clp;
+};
+
+static void nfs4_get_lease_time_prepare(struct rpc_task *task,
+ void *calldata)
+{
+ int ret;
+ struct nfs4_get_lease_time_data *data =
+ (struct nfs4_get_lease_time_data *)calldata;
+
+ dprintk("--> %s\n", __func__);
+ /* just setup sequence, do not trigger session recovery
+ since we're invoked within one */
+ ret = nfs41_setup_sequence(data->clp->cl_session,
+ &data->args->la_seq_args,
+ &data->res->lr_seq_res, 0, task);
+
+ BUG_ON(ret == -EAGAIN);
+ rpc_call_start(task);
+ dprintk("<-- %s\n", __func__);
+}
+
+/*
+ * Called from nfs4_state_manager thread for session setup, so don't recover
+ * from sequence operation or clientid errors.
+ */
+static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_get_lease_time_data *data =
+ (struct nfs4_get_lease_time_data *)calldata;
+
+ dprintk("--> %s\n", __func__);
+ nfs41_sequence_done(data->clp, &data->res->lr_seq_res, task->tk_status);
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
+ rpc_delay(task, NFS4_POLL_RETRY_MIN);
+ task->tk_status = 0;
+ nfs4_restart_rpc(task, data->clp);
+ return;
+ }
+ nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
+ dprintk("<-- %s\n", __func__);
+}
+
+struct rpc_call_ops nfs4_get_lease_time_ops = {
+ .rpc_call_prepare = nfs4_get_lease_time_prepare,
+ .rpc_call_done = nfs4_get_lease_time_done,
+};
+
+int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
+{
+ struct rpc_task *task;
+ struct nfs4_get_lease_time_args args;
+ struct nfs4_get_lease_time_res res = {
+ .lr_fsinfo = fsinfo,
+ };
+ struct nfs4_get_lease_time_data data = {
+ .args = &args,
+ .res = &res,
+ .clp = clp,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_get_lease_time_ops,
+ .callback_data = &data
+ };
+ int status;
+
+ res.lr_seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
+ dprintk("--> %s\n", __func__);
+ task = rpc_run_task(&task_setup);
+
+ if (IS_ERR(task))
+ status = PTR_ERR(task);
+ else {
+ status = task->tk_status;
+ rpc_put_task(task);
+ }
+ dprintk("<-- %s return %d\n", __func__, status);
+
+ return status;
+}
+
+/*
+ * Reset a slot table
+ */
+static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots,
+ int old_max_slots, int ivalue)
+{
+ int i;
+ int ret = 0;
+
+ dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl);
+
+ /*
+ * Until we have dynamic slot table adjustment, insist
+ * upon the same slot table size
+ */
+ if (max_slots != old_max_slots) {
+ dprintk("%s reset slot table does't match old\n",
+ __func__);
+ ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */
+ goto out;
+ }
+ spin_lock(&tbl->slot_tbl_lock);
+ for (i = 0; i < max_slots; ++i)
+ tbl->slots[i].seq_nr = ivalue;
+ tbl->highest_used_slotid = -1;
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
+ tbl, tbl->slots, tbl->max_slots);
+out:
+ dprintk("<-- %s: return %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * Reset the forechannel and backchannel slot tables
+ */
+static int nfs4_reset_slot_tables(struct nfs4_session *session)
+{
+ int status;
+
+ status = nfs4_reset_slot_table(&session->fc_slot_table,
+ session->fc_attrs.max_reqs,
+ session->fc_slot_table.max_slots,
+ 1);
+ if (status)
+ return status;
+
+ status = nfs4_reset_slot_table(&session->bc_slot_table,
+ session->bc_attrs.max_reqs,
+ session->bc_slot_table.max_slots,
+ 0);
+ return status;
+}
+
+/* Destroy the slot table */
+static void nfs4_destroy_slot_tables(struct nfs4_session *session)
+{
+ if (session->fc_slot_table.slots != NULL) {
+ kfree(session->fc_slot_table.slots);
+ session->fc_slot_table.slots = NULL;
+ }
+ if (session->bc_slot_table.slots != NULL) {
+ kfree(session->bc_slot_table.slots);
+ session->bc_slot_table.slots = NULL;
+ }
+ return;
+}
+
+/*
+ * Initialize slot table
+ */
+static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
+ int max_slots, int ivalue)
+{
+ int i;
+ struct nfs4_slot *slot;
+ int ret = -ENOMEM;
+
+ BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
+
+ dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
+
+ slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
+ if (!slot)
+ goto out;
+ for (i = 0; i < max_slots; ++i)
+ slot[i].seq_nr = ivalue;
+ ret = 0;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (tbl->slots != NULL) {
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: slot table already initialized. tbl=%p slots=%p\n",
+ __func__, tbl, tbl->slots);
+ WARN_ON(1);
+ goto out_free;
+ }
+ tbl->max_slots = max_slots;
+ tbl->slots = slot;
+ tbl->highest_used_slotid = -1; /* no slot is currently used */
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
+ tbl, tbl->slots, tbl->max_slots);
+out:
+ dprintk("<-- %s: return %d\n", __func__, ret);
+ return ret;
+
+out_free:
+ kfree(slot);
+ goto out;
+}
+
+/*
+ * Initialize the forechannel and backchannel tables
+ */
+static int nfs4_init_slot_tables(struct nfs4_session *session)
+{
+ int status;
+
+ status = nfs4_init_slot_table(&session->fc_slot_table,
+ session->fc_attrs.max_reqs, 1);
+ if (status)
+ return status;
+
+ status = nfs4_init_slot_table(&session->bc_slot_table,
+ session->bc_attrs.max_reqs, 0);
+ if (status)
+ nfs4_destroy_slot_tables(session);
+
+ return status;
+}
+
+struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session;
+ struct nfs4_slot_table *tbl;
+
+ session = kzalloc(sizeof(struct nfs4_session), GFP_KERNEL);
+ if (!session)
+ return NULL;
+
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ /*
+ * The create session reply races with the server back
+ * channel probe. Mark the client NFS_CS_SESSION_INITING
+ * so that the client back channel can find the
+ * nfs_client struct
+ */
+ clp->cl_cons_state = NFS_CS_SESSION_INITING;
+
+ tbl = &session->fc_slot_table;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+
+ tbl = &session->bc_slot_table;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+
+ session->clp = clp;
+ return session;
+}
+
+void nfs4_destroy_session(struct nfs4_session *session)
+{
+ nfs4_proc_destroy_session(session);
+ dprintk("%s Destroy backchannel for xprt %p\n",
+ __func__, session->clp->cl_rpcclient->cl_xprt);
+ xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
+ NFS41_BC_MIN_CALLBACKS);
+ nfs4_destroy_slot_tables(session);
+ kfree(session);
+}
+
+/*
+ * Initialize the values to be used by the client in CREATE_SESSION
+ * If nfs4_init_session set the fore channel request and response sizes,
+ * use them.
+ *
+ * Set the back channel max_resp_sz_cached to zero to force the client to
+ * always set csa_cachethis to FALSE because the current implementation
+ * of the back channel DRC only supports caching the CB_SEQUENCE operation.
+ */
+static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
+{
+ struct nfs4_session *session = args->client->cl_session;
+ unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
+ mxresp_sz = session->fc_attrs.max_resp_sz;
+
+ if (mxrqst_sz == 0)
+ mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
+ if (mxresp_sz == 0)
+ mxresp_sz = NFS_MAX_FILE_IO_SIZE;
+ /* Fore channel attributes */
+ args->fc_attrs.headerpadsz = 0;
+ args->fc_attrs.max_rqst_sz = mxrqst_sz;
+ args->fc_attrs.max_resp_sz = mxresp_sz;
+ args->fc_attrs.max_resp_sz_cached = mxresp_sz;
+ args->fc_attrs.max_ops = NFS4_MAX_OPS;
+ args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
+
+ dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
+ "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
+ __func__,
+ args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
+ args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops,
+ args->fc_attrs.max_reqs);
+
+ /* Back channel attributes */
+ args->bc_attrs.headerpadsz = 0;
+ args->bc_attrs.max_rqst_sz = PAGE_SIZE;
+ args->bc_attrs.max_resp_sz = PAGE_SIZE;
+ args->bc_attrs.max_resp_sz_cached = 0;
+ args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
+ args->bc_attrs.max_reqs = 1;
+
+ dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
+ "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
+ __func__,
+ args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
+ args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
+ args->bc_attrs.max_reqs);
+}
+
+static int _verify_channel_attr(char *chan, char *attr_name, u32 sent, u32 rcvd)
+{
+ if (rcvd <= sent)
+ return 0;
+ printk(KERN_WARNING "%s: Session INVALID: %s channel %s increased. "
+ "sent=%u rcvd=%u\n", __func__, chan, attr_name, sent, rcvd);
+ return -EINVAL;
+}
+
+#define _verify_fore_channel_attr(_name_) \
+ _verify_channel_attr("fore", #_name_, \
+ args->fc_attrs._name_, \
+ session->fc_attrs._name_)
+
+#define _verify_back_channel_attr(_name_) \
+ _verify_channel_attr("back", #_name_, \
+ args->bc_attrs._name_, \
+ session->bc_attrs._name_)
+
+/*
+ * The server is not allowed to increase the fore channel header pad size,
+ * maximum response size, or maximum number of operations.
+ *
+ * The back channel attributes are only negotiatied down: We send what the
+ * (back channel) server insists upon.
+ */
+static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
+ struct nfs4_session *session)
+{
+ int ret = 0;
+
+ ret |= _verify_fore_channel_attr(headerpadsz);
+ ret |= _verify_fore_channel_attr(max_resp_sz);
+ ret |= _verify_fore_channel_attr(max_ops);
+
+ ret |= _verify_back_channel_attr(headerpadsz);
+ ret |= _verify_back_channel_attr(max_rqst_sz);
+ ret |= _verify_back_channel_attr(max_resp_sz);
+ ret |= _verify_back_channel_attr(max_resp_sz_cached);
+ ret |= _verify_back_channel_attr(max_ops);
+ ret |= _verify_back_channel_attr(max_reqs);
+
+ return ret;
+}
+
+static int _nfs4_proc_create_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session = clp->cl_session;
+ struct nfs41_create_session_args args = {
+ .client = clp,
+ .cb_program = NFS4_CALLBACK,
+ };
+ struct nfs41_create_session_res res = {
+ .client = clp,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ nfs4_init_channel_attrs(&args);
+ args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
+
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
+
+ if (!status)
+ /* Verify the session's negotiated channel_attrs values */
+ status = nfs4_verify_channel_attrs(&args, session);
+ if (!status) {
+ /* Increment the clientid slot sequence id */
+ clp->cl_seqid++;
+ }
+
+ return status;
+}
+
+/*
+ * Issues a CREATE_SESSION operation to the server.
+ * It is the responsibility of the caller to verify the session is
+ * expired before calling this routine.
+ */
+int nfs4_proc_create_session(struct nfs_client *clp, int reset)
+{
+ int status;
+ unsigned *ptr;
+ struct nfs_fsinfo fsinfo;
+ struct nfs4_session *session = clp->cl_session;
+
+ dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
+
+ status = _nfs4_proc_create_session(clp);
+ if (status)
+ goto out;
+
+ /* Init or reset the fore channel */
+ if (reset)
+ status = nfs4_reset_slot_tables(session);
+ else
+ status = nfs4_init_slot_tables(session);
+ dprintk("fore channel slot table initialization returned %d\n", status);
+ if (status)
+ goto out;
+
+ ptr = (unsigned *)&session->sess_id.data[0];
+ dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
+ clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
+
+ if (reset)
+ /* Lease time is aleady set */
+ goto out;
+
+ /* Get the lease time */
+ status = nfs4_proc_get_lease_time(clp, &fsinfo);
+ if (status == 0) {
+ /* Update lease time and schedule renewal */
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = fsinfo.lease_time * HZ;
+ clp->cl_last_renewal = jiffies;
+ clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ spin_unlock(&clp->cl_lock);
+
+ nfs4_schedule_state_renewal(clp);
+ }
+out:
+ dprintk("<-- %s\n", __func__);
+ return status;
+}
+
+/*
+ * Issue the over-the-wire RPC DESTROY_SESSION.
+ * The caller must serialize access to this routine.
+ */
+int nfs4_proc_destroy_session(struct nfs4_session *session)
+{
+ int status = 0;
+ struct rpc_message msg;
+
+ dprintk("--> nfs4_proc_destroy_session\n");
+
+ /* session is still being setup */
+ if (session->clp->cl_cons_state != NFS_CS_READY)
+ return status;
+
+ msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
+ msg.rpc_argp = session;
+ msg.rpc_resp = NULL;
+ msg.rpc_cred = NULL;
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
+
+ if (status)
+ printk(KERN_WARNING
+ "Got error %d from the server on DESTROY_SESSION. "
+ "Session has been destroyed regardless...\n", status);
+
+ dprintk("<-- nfs4_proc_destroy_session\n");
+ return status;
+}
+
+/*
+ * Renew the cl_session lease.
+ */
+static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct nfs4_sequence_args args;
+ struct nfs4_sequence_res res;
+
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+
+ args.sa_cache_this = 0;
+
+ return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args,
+ &res, 0);
+}
+
+void nfs41_sequence_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_client *clp = (struct nfs_client *)data;
+
+ nfs41_sequence_done(clp, task->tk_msg.rpc_resp, task->tk_status);
+
+ if (task->tk_status < 0) {
+ dprintk("%s ERROR %d\n", __func__, task->tk_status);
+
+ if (_nfs4_async_handle_error(task, NULL, clp, NULL)
+ == -EAGAIN) {
+ nfs4_restart_rpc(task, clp);
+ return;
+ }
+ }
+ nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
+ dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
+
+ put_rpccred(task->tk_msg.rpc_cred);
+ kfree(task->tk_msg.rpc_argp);
+ kfree(task->tk_msg.rpc_resp);
+
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs_client *clp;
+ struct nfs4_sequence_args *args;
+ struct nfs4_sequence_res *res;
+
+ clp = (struct nfs_client *)data;
+ args = task->tk_msg.rpc_argp;
+ res = task->tk_msg.rpc_resp;
+
+ if (nfs4_setup_sequence(clp, args, res, 0, task))
+ return;
+ rpc_call_start(task);
+}
+
+static const struct rpc_call_ops nfs41_sequence_ops = {
+ .rpc_call_done = nfs41_sequence_call_done,
+ .rpc_call_prepare = nfs41_sequence_prepare,
+};
+
+static int nfs41_proc_async_sequence(struct nfs_client *clp,
+ struct rpc_cred *cred)
+{
+ struct nfs4_sequence_args *args;
+ struct nfs4_sequence_res *res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
+ .rpc_cred = cred,
+ };
+
+ args = kzalloc(sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ kfree(args);
+ return -ENOMEM;
+ }
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ msg.rpc_argp = args;
+ msg.rpc_resp = res;
+
+ return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
+ &nfs41_sequence_ops, (void *)clp);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
};
-struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops = {
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
+ .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
+ .recover_open = nfs4_open_reclaim,
+ .recover_lock = nfs4_lock_reclaim,
+ .establish_clid = nfs4_proc_exchange_id,
+ .get_clid_cred = nfs4_get_exchange_id_cred,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
+ .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
+ .recover_open = nfs4_open_expired,
+ .recover_lock = nfs4_lock_expired,
+ .establish_clid = nfs4_proc_exchange_id,
+ .get_clid_cred = nfs4_get_exchange_id_cred,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
+ .sched_state_renewal = nfs4_proc_async_renew,
+ .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
+ .renew_lease = nfs4_proc_renew,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
+ .sched_state_renewal = nfs41_proc_async_sequence,
+ .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
+ .renew_lease = nfs4_proc_sequence,
+};
+#endif
+
+/*
+ * Per minor version reboot and network partition recovery ops
+ */
+
+struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[] = {
+ &nfs40_reboot_recovery_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_reboot_recovery_ops,
+#endif
+};
+
+struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[] = {
+ &nfs40_nograce_recovery_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_nograce_recovery_ops,
+#endif
+};
+
+struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[] = {
+ &nfs40_state_renewal_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_state_renewal_ops,
+#endif
};
static const struct inode_operations nfs4_file_inode_operations = {
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index f524e93..e27c6ce 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,12 +59,14 @@
void
nfs4_renew_state(struct work_struct *work)
{
+ struct nfs4_state_maintenance_ops *ops;
struct nfs_client *clp =
container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred;
long lease, timeout;
unsigned long last, now;
+ ops = nfs4_state_renewal_ops[clp->cl_minorversion];
dprintk("%s: start\n", __func__);
/* Are there any active superblocks? */
if (list_empty(&clp->cl_superblocks))
@@ -76,7 +78,7 @@
timeout = (2 * lease) / 3 + (long)last - (long)now;
/* Are we close to a lease timeout? */
if (time_after(now, last + lease/3)) {
- cred = nfs4_get_renew_cred_locked(clp);
+ cred = ops->get_state_renewal_cred_locked(clp);
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
if (list_empty(&clp->cl_delegations)) {
@@ -86,7 +88,7 @@
nfs_expire_all_delegations(clp);
} else {
/* Queue an asynchronous RENEW. */
- nfs4_proc_async_renew(clp, cred);
+ ops->sched_state_renewal(clp, cred);
put_rpccred(cred);
}
timeout = (2 * lease) / 3;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0298e90..b73c5a7 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -60,7 +60,7 @@
static LIST_HEAD(nfs4_clientid_list);
-static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
unsigned short port;
int status;
@@ -77,7 +77,7 @@
return status;
}
-static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
{
struct rpc_cred *cred = NULL;
@@ -114,17 +114,21 @@
return cred;
}
-static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
+#if defined(CONFIG_NFS_V4_1)
+
+struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
spin_lock(&clp->cl_lock);
- cred = nfs4_get_renew_cred_locked(clp);
+ cred = nfs4_get_machine_cred_locked(clp);
spin_unlock(&clp->cl_lock);
return cred;
}
-static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+#endif /* CONFIG_NFS_V4_1 */
+
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
struct rb_node *pos;
@@ -738,12 +742,14 @@
void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
{
- if (status == -NFS4ERR_BAD_SEQID) {
- struct nfs4_state_owner *sp = container_of(seqid->sequence,
- struct nfs4_state_owner, so_seqid);
+ struct nfs4_state_owner *sp = container_of(seqid->sequence,
+ struct nfs4_state_owner, so_seqid);
+ struct nfs_server *server = sp->so_server;
+
+ if (status == -NFS4ERR_BAD_SEQID)
nfs4_drop_state_owner(sp);
- }
- nfs_increment_seqid(status, seqid);
+ if (!nfs4_has_session(server->nfs_client))
+ nfs_increment_seqid(status, seqid);
}
/*
@@ -847,32 +853,45 @@
struct file_lock *fl;
int status = 0;
+ if (inode->i_flock == NULL)
+ return 0;
+
+ /* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem);
+ /* Protect inode->i_flock using the BKL */
+ lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
+ unlock_kernel();
status = ops->recover_lock(state, fl);
- if (status >= 0)
- continue;
switch (status) {
+ case 0:
+ break;
+ case -ESTALE:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ case -NFS4ERR_STALE_CLIENTID:
+ goto out;
default:
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
__func__, status);
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_NO_GRACE:
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
- break;
- case -NFS4ERR_STALE_CLIENTID:
- goto out_err;
+ status = 0;
}
+ lock_kernel();
}
- up_write(&nfsi->rwsem);
- return 0;
-out_err:
+ unlock_kernel();
+out:
up_write(&nfsi->rwsem);
return status;
}
@@ -918,6 +937,7 @@
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
__func__, status);
case -ENOENT:
+ case -ENOMEM:
case -ESTALE:
/*
* Open state on this file cannot be recovered
@@ -928,6 +948,9 @@
/* Mark the file as being 'closed' */
state->state = 0;
break;
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
@@ -1042,6 +1065,14 @@
case -NFS4ERR_EXPIRED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
}
}
@@ -1075,18 +1106,22 @@
static int nfs4_check_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
+ struct nfs4_state_maintenance_ops *ops =
+ nfs4_state_renewal_ops[clp->cl_minorversion];
int status = -NFS4ERR_EXPIRED;
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
return 0;
- cred = nfs4_get_renew_cred(clp);
+ spin_lock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
if (cred == NULL) {
cred = nfs4_get_setclientid_cred(clp);
if (cred == NULL)
goto out;
}
- status = nfs4_proc_renew(clp, cred);
+ status = ops->renew_lease(clp, cred);
put_rpccred(cred);
out:
nfs4_recovery_handle_error(clp, status);
@@ -1096,21 +1131,98 @@
static int nfs4_reclaim_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
+ struct nfs4_state_recovery_ops *ops =
+ nfs4_reboot_recovery_ops[clp->cl_minorversion];
int status = -ENOENT;
- cred = nfs4_get_setclientid_cred(clp);
+ cred = ops->get_clid_cred(clp);
if (cred != NULL) {
- status = nfs4_init_client(clp, cred);
+ status = ops->establish_clid(clp, cred);
put_rpccred(cred);
/* Handle case where the user hasn't set up machine creds */
if (status == -EACCES && cred == clp->cl_machine_cred) {
nfs4_clear_machine_cred(clp);
status = -EAGAIN;
}
+ if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
+ status = -EPROTONOSUPPORT;
}
return status;
}
+#ifdef CONFIG_NFS_V4_1
+static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
+{
+ switch (err) {
+ case -NFS4ERR_STALE_CLIENTID:
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ }
+}
+
+static int nfs4_reset_session(struct nfs_client *clp)
+{
+ int status;
+
+ status = nfs4_proc_destroy_session(clp->cl_session);
+ if (status && status != -NFS4ERR_BADSESSION &&
+ status != -NFS4ERR_DEADSESSION) {
+ nfs4_session_recovery_handle_error(clp, status);
+ goto out;
+ }
+
+ memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
+ status = nfs4_proc_create_session(clp, 1);
+ if (status)
+ nfs4_session_recovery_handle_error(clp, status);
+ /* fall through*/
+out:
+ /* Wake up the next rpc task even on error */
+ rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
+ return status;
+}
+
+static int nfs4_initialize_session(struct nfs_client *clp)
+{
+ int status;
+
+ status = nfs4_proc_create_session(clp, 0);
+ if (!status) {
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+ } else if (status == -NFS4ERR_STALE_CLIENTID) {
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ } else {
+ nfs_mark_client_ready(clp, status);
+ }
+ return status;
+}
+#else /* CONFIG_NFS_V4_1 */
+static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
+static int nfs4_initialize_session(struct nfs_client *clp) { return 0; }
+#endif /* CONFIG_NFS_V4_1 */
+
+/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
+ * on EXCHANGE_ID for v4.1
+ */
+static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
+{
+ if (nfs4_has_session(clp)) {
+ switch (status) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_CLID_INUSE:
+ case -EAGAIN:
+ break;
+
+ case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+ * in nfs4_exchange_id */
+ default:
+ return;
+ }
+ }
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+}
+
static void nfs4_state_manager(struct nfs_client *clp)
{
int status = 0;
@@ -1121,9 +1233,12 @@
/* We're going to have to re-establish a clientid */
status = nfs4_reclaim_lease(clp);
if (status) {
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_set_lease_expired(clp, status);
if (status == -EAGAIN)
continue;
+ if (clp->cl_cons_state ==
+ NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, status);
goto out_error;
}
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
@@ -1134,25 +1249,44 @@
if (status != 0)
continue;
}
-
+ /* Initialize or reset the session */
+ if (nfs4_has_session(clp) &&
+ test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+ status = nfs4_initialize_session(clp);
+ else
+ status = nfs4_reset_session(clp);
+ if (status) {
+ if (status == -NFS4ERR_STALE_CLIENTID)
+ continue;
+ goto out_error;
+ }
+ }
/* First recover reboot state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
- status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops);
+ status = nfs4_do_reclaim(clp,
+ nfs4_reboot_recovery_ops[clp->cl_minorversion]);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
+ if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ continue;
nfs4_state_end_reclaim_reboot(clp);
continue;
}
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
- status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops);
+ status = nfs4_do_reclaim(clp,
+ nfs4_nograce_recovery_ops[clp->cl_minorversion]);
if (status < 0) {
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
if (status == -NFS4ERR_EXPIRED)
continue;
+ if (test_bit(NFS4CLNT_SESSION_SETUP,
+ &clp->cl_state))
+ continue;
goto out_error;
} else
nfs4_state_end_reclaim_nograce(clp);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 1690f0e..617273e 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -192,12 +192,16 @@
decode_verifier_maxsz)
#define encode_remove_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
+#define decode_remove_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz)
#define encode_rename_maxsz (op_encode_hdr_maxsz + \
2 * nfs4_name_maxsz)
-#define decode_rename_maxsz (op_decode_hdr_maxsz + 5 + 5)
+#define decode_rename_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz + \
+ decode_change_info_maxsz)
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
-#define decode_link_maxsz (op_decode_hdr_maxsz + 5)
+#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 8)
@@ -240,43 +244,115 @@
(encode_getattr_maxsz)
#define decode_fs_locations_maxsz \
(0)
+
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MACHINE_NAME_LEN (64)
+
+#define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \
+ encode_verifier_maxsz + \
+ 1 /* co_ownerid.len */ + \
+ XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \
+ 1 /* flags */ + \
+ 1 /* spa_how */ + \
+ 0 /* SP4_NONE (for now) */ + \
+ 1 /* zero implemetation id array */)
+#define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \
+ 2 /* eir_clientid */ + \
+ 1 /* eir_sequenceid */ + \
+ 1 /* eir_flags */ + \
+ 1 /* spr_how */ + \
+ 0 /* SP4_NONE (for now) */ + \
+ 2 /* eir_server_owner.so_minor_id */ + \
+ /* eir_server_owner.so_major_id<> */ \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
+ /* eir_server_scope<> */ \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
+ 1 /* eir_server_impl_id array length */ + \
+ 0 /* ignored eir_server_impl_id contents */)
+#define encode_channel_attrs_maxsz (6 + 1 /* ca_rdma_ird.len (0) */)
+#define decode_channel_attrs_maxsz (6 + \
+ 1 /* ca_rdma_ird.len */ + \
+ 1 /* ca_rdma_ird */)
+#define encode_create_session_maxsz (op_encode_hdr_maxsz + \
+ 2 /* csa_clientid */ + \
+ 1 /* csa_sequence */ + \
+ 1 /* csa_flags */ + \
+ encode_channel_attrs_maxsz + \
+ encode_channel_attrs_maxsz + \
+ 1 /* csa_cb_program */ + \
+ 1 /* csa_sec_parms.len (1) */ + \
+ 1 /* cb_secflavor (AUTH_SYS) */ + \
+ 1 /* stamp */ + \
+ 1 /* machinename.len */ + \
+ XDR_QUADLEN(NFS4_MAX_MACHINE_NAME_LEN) + \
+ 1 /* uid */ + \
+ 1 /* gid */ + \
+ 1 /* gids.len (0) */)
+#define decode_create_session_maxsz (op_decode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+ 1 /* csr_sequence */ + \
+ 1 /* csr_flags */ + \
+ decode_channel_attrs_maxsz + \
+ decode_channel_attrs_maxsz)
+#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_destroy_session_maxsz (op_decode_hdr_maxsz)
+#define encode_sequence_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
+#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
+#else /* CONFIG_NFS_V4_1 */
+#define encode_sequence_maxsz 0
+#define decode_sequence_maxsz 0
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFS4_enc_compound_sz (1024) /* XXX: large enough? */
#define NFS4_dec_compound_sz (1024) /* XXX: large enough? */
#define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_read_maxsz)
#define NFS4_dec_read_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_read_maxsz)
#define NFS4_enc_readlink_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_readlink_maxsz)
#define NFS4_dec_readlink_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_readlink_maxsz)
#define NFS4_enc_readdir_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_readdir_maxsz)
#define NFS4_dec_readdir_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_readdir_maxsz)
#define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_write_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_write_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_commit_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_commit_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_open_maxsz + \
@@ -285,6 +361,7 @@
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_open_maxsz + \
@@ -301,43 +378,53 @@
decode_putfh_maxsz + \
decode_open_confirm_maxsz)
#define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_downgrade_sz \
(compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_downgrade_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_downgrade_sz \
(compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_downgrade_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_close_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_close_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_setattr_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_setattr_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_fsinfo_maxsz)
#define NFS4_dec_fsinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_renew_sz (compound_encode_hdr_maxsz + \
@@ -359,64 +446,81 @@
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_lock_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lock_maxsz)
#define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lock_maxsz)
#define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lockt_maxsz)
#define NFS4_dec_lockt_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lockt_maxsz)
#define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_locku_maxsz)
#define NFS4_dec_locku_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_locku_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_access_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_access_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_access_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putrootfh_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_lookup_root_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_remove_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 5 + \
+ decode_remove_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
@@ -425,6 +529,7 @@
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
@@ -433,6 +538,7 @@
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
@@ -441,6 +547,7 @@
encode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
@@ -449,16 +556,19 @@
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_symlink_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_symlink_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_create_maxsz + \
@@ -467,6 +577,7 @@
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_create_maxsz + \
@@ -475,52 +586,98 @@
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_pathconf_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_statfs_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_statfs_maxsz)
#define NFS4_dec_statfs_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_statfs_maxsz)
#define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_delegreturn_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_delegreturn_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getacl_maxsz)
#define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getacl_maxsz)
#define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_setacl_maxsz)
#define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_setacl_maxsz)
#define NFS4_enc_fs_locations_sz \
(compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
encode_fs_locations_maxsz)
#define NFS4_dec_fs_locations_sz \
(compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_fs_locations_maxsz)
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_enc_exchange_id_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_exchange_id_maxsz)
+#define NFS4_dec_exchange_id_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_exchange_id_maxsz)
+#define NFS4_enc_create_session_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_create_session_maxsz)
+#define NFS4_dec_create_session_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_create_session_maxsz)
+#define NFS4_enc_destroy_session_sz (compound_encode_hdr_maxsz + \
+ encode_destroy_session_maxsz)
+#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \
+ decode_destroy_session_maxsz)
+#define NFS4_enc_sequence_sz \
+ (compound_decode_hdr_maxsz + \
+ encode_sequence_maxsz)
+#define NFS4_dec_sequence_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz)
+#define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putrootfh_maxsz + \
+ encode_fsinfo_maxsz)
+#define NFS4_dec_get_lease_time_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putrootfh_maxsz + \
+ decode_fsinfo_maxsz)
+#endif /* CONFIG_NFS_V4_1 */
static const umode_t nfs_type2fmt[] = {
[NF4BAD] = 0,
@@ -541,6 +698,8 @@
__be32 * nops_p;
uint32_t taglen;
char * tag;
+ uint32_t replen; /* expected reply words */
+ u32 minorversion;
};
/*
@@ -576,22 +735,31 @@
xdr_encode_opaque(p, str, len);
}
-static void encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
+static void encode_compound_hdr(struct xdr_stream *xdr,
+ struct rpc_rqst *req,
+ struct compound_hdr *hdr)
{
__be32 *p;
+ struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+
+ /* initialize running count of expected bytes in reply.
+ * NOTE: the replied tag SHOULD be the same is the one sent,
+ * but this is not required as a MUST for the server to do so. */
+ hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen;
dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag);
BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
RESERVE_SPACE(12+(XDR_QUADLEN(hdr->taglen)<<2));
WRITE32(hdr->taglen);
WRITEMEM(hdr->tag, hdr->taglen);
- WRITE32(NFS4_MINOR_VERSION);
+ WRITE32(hdr->minorversion);
hdr->nops_p = p;
WRITE32(hdr->nops);
}
static void encode_nops(struct compound_hdr *hdr)
{
+ BUG_ON(hdr->nops > NFS4_MAX_OPS);
*hdr->nops_p = htonl(hdr->nops);
}
@@ -736,6 +904,7 @@
WRITE32(OP_ACCESS);
WRITE32(access);
hdr->nops++;
+ hdr->replen += decode_access_maxsz;
}
static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
@@ -747,6 +916,7 @@
WRITE32(arg->seqid->sequence->counter);
WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_close_maxsz;
}
static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
@@ -758,6 +928,7 @@
WRITE64(args->offset);
WRITE32(args->count);
hdr->nops++;
+ hdr->replen += decode_commit_maxsz;
}
static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr)
@@ -789,6 +960,7 @@
WRITE32(create->name->len);
WRITEMEM(create->name->name, create->name->len);
hdr->nops++;
+ hdr->replen += decode_create_maxsz;
encode_attrs(xdr, create->attrs, create->server);
}
@@ -802,6 +974,7 @@
WRITE32(1);
WRITE32(bitmap);
hdr->nops++;
+ hdr->replen += decode_getattr_maxsz;
}
static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr)
@@ -814,6 +987,7 @@
WRITE32(bm0);
WRITE32(bm1);
hdr->nops++;
+ hdr->replen += decode_getattr_maxsz;
}
static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
@@ -841,6 +1015,7 @@
RESERVE_SPACE(4);
WRITE32(OP_GETFH);
hdr->nops++;
+ hdr->replen += decode_getfh_maxsz;
}
static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -852,6 +1027,7 @@
WRITE32(name->len);
WRITEMEM(name->name, name->len);
hdr->nops++;
+ hdr->replen += decode_link_maxsz;
}
static inline int nfs4_lock_type(struct file_lock *fl, int block)
@@ -899,6 +1075,7 @@
WRITE32(args->lock_seqid->sequence->counter);
}
hdr->nops++;
+ hdr->replen += decode_lock_maxsz;
}
static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr)
@@ -915,6 +1092,7 @@
WRITEMEM("lock id:", 8);
WRITE64(args->lock_owner.id);
hdr->nops++;
+ hdr->replen += decode_lockt_maxsz;
}
static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr)
@@ -929,6 +1107,7 @@
WRITE64(args->fl->fl_start);
WRITE64(nfs4_lock_length(args->fl));
hdr->nops++;
+ hdr->replen += decode_locku_maxsz;
}
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -941,6 +1120,7 @@
WRITE32(len);
WRITEMEM(name->name, len);
hdr->nops++;
+ hdr->replen += decode_lookup_maxsz;
}
static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
@@ -1080,6 +1260,7 @@
BUG();
}
hdr->nops++;
+ hdr->replen += decode_open_maxsz;
}
static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr)
@@ -1091,6 +1272,7 @@
WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
WRITE32(arg->seqid->sequence->counter);
hdr->nops++;
+ hdr->replen += decode_open_confirm_maxsz;
}
static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
@@ -1103,6 +1285,7 @@
WRITE32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
hdr->nops++;
+ hdr->replen += decode_open_downgrade_maxsz;
}
static void
@@ -1116,6 +1299,7 @@
WRITE32(len);
WRITEMEM(fh->data, len);
hdr->nops++;
+ hdr->replen += decode_putfh_maxsz;
}
static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
@@ -1125,6 +1309,7 @@
RESERVE_SPACE(4);
WRITE32(OP_PUTROOTFH);
hdr->nops++;
+ hdr->replen += decode_putrootfh_maxsz;
}
static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx)
@@ -1153,6 +1338,7 @@
WRITE64(args->offset);
WRITE32(args->count);
hdr->nops++;
+ hdr->replen += decode_read_maxsz;
}
static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr)
@@ -1178,6 +1364,7 @@
WRITE32(attrs[0] & readdir->bitmask[0]);
WRITE32(attrs[1] & readdir->bitmask[1]);
hdr->nops++;
+ hdr->replen += decode_readdir_maxsz;
dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
__func__,
(unsigned long long)readdir->cookie,
@@ -1194,6 +1381,7 @@
RESERVE_SPACE(4);
WRITE32(OP_READLINK);
hdr->nops++;
+ hdr->replen += decode_readlink_maxsz;
}
static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -1205,6 +1393,7 @@
WRITE32(name->len);
WRITEMEM(name->name, name->len);
hdr->nops++;
+ hdr->replen += decode_remove_maxsz;
}
static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr)
@@ -1220,6 +1409,7 @@
WRITE32(newname->len);
WRITEMEM(newname->name, newname->len);
hdr->nops++;
+ hdr->replen += decode_rename_maxsz;
}
static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr)
@@ -1230,6 +1420,7 @@
WRITE32(OP_RENEW);
WRITE64(client_stateid->cl_clientid);
hdr->nops++;
+ hdr->replen += decode_renew_maxsz;
}
static void
@@ -1240,6 +1431,7 @@
RESERVE_SPACE(4);
WRITE32(OP_RESTOREFH);
hdr->nops++;
+ hdr->replen += decode_restorefh_maxsz;
}
static int
@@ -1259,6 +1451,7 @@
WRITE32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
hdr->nops++;
+ hdr->replen += decode_setacl_maxsz;
return 0;
}
@@ -1270,6 +1463,7 @@
RESERVE_SPACE(4);
WRITE32(OP_SAVEFH);
hdr->nops++;
+ hdr->replen += decode_savefh_maxsz;
}
static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr)
@@ -1280,6 +1474,7 @@
WRITE32(OP_SETATTR);
WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_setattr_maxsz;
encode_attrs(xdr, arg->iap, server);
}
@@ -1299,6 +1494,7 @@
RESERVE_SPACE(4);
WRITE32(setclientid->sc_cb_ident);
hdr->nops++;
+ hdr->replen += decode_setclientid_maxsz;
}
static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr)
@@ -1310,6 +1506,7 @@
WRITE64(client_state->cl_clientid);
WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
hdr->nops++;
+ hdr->replen += decode_setclientid_confirm_maxsz;
}
static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
@@ -1328,6 +1525,7 @@
xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
hdr->nops++;
+ hdr->replen += decode_write_maxsz;
}
static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr)
@@ -1339,11 +1537,163 @@
WRITE32(OP_DELEGRETURN);
WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_delegreturn_maxsz;
}
+
+#if defined(CONFIG_NFS_V4_1)
+/* NFSv4.1 operations */
+static void encode_exchange_id(struct xdr_stream *xdr,
+ struct nfs41_exchange_id_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ RESERVE_SPACE(4 + sizeof(args->verifier->data));
+ WRITE32(OP_EXCHANGE_ID);
+ WRITEMEM(args->verifier->data, sizeof(args->verifier->data));
+
+ encode_string(xdr, args->id_len, args->id);
+
+ RESERVE_SPACE(12);
+ WRITE32(args->flags);
+ WRITE32(0); /* zero length state_protect4_a */
+ WRITE32(0); /* zero length implementation id array */
+ hdr->nops++;
+ hdr->replen += decode_exchange_id_maxsz;
+}
+
+static void encode_create_session(struct xdr_stream *xdr,
+ struct nfs41_create_session_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
+ uint32_t len;
+ struct nfs_client *clp = args->client;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_CREATE_SESSION);
+
+ RESERVE_SPACE(8);
+ WRITE64(clp->cl_ex_clid);
+
+ RESERVE_SPACE(8);
+ WRITE32(clp->cl_seqid); /*Sequence id */
+ WRITE32(args->flags); /*flags */
+
+ RESERVE_SPACE(2*28); /* 2 channel_attrs */
+ /* Fore Channel */
+ WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
+ WRITE32(args->fc_attrs.max_rqst_sz); /* max req size */
+ WRITE32(args->fc_attrs.max_resp_sz); /* max resp size */
+ WRITE32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ WRITE32(args->fc_attrs.max_ops); /* max operations */
+ WRITE32(args->fc_attrs.max_reqs); /* max requests */
+ WRITE32(0); /* rdmachannel_attrs */
+
+ /* Back Channel */
+ WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
+ WRITE32(args->bc_attrs.max_rqst_sz); /* max req size */
+ WRITE32(args->bc_attrs.max_resp_sz); /* max resp size */
+ WRITE32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ WRITE32(args->bc_attrs.max_ops); /* max operations */
+ WRITE32(args->bc_attrs.max_reqs); /* max requests */
+ WRITE32(0); /* rdmachannel_attrs */
+
+ RESERVE_SPACE(4);
+ WRITE32(args->cb_program); /* cb_program */
+
+ RESERVE_SPACE(4); /* # of security flavors */
+ WRITE32(1);
+
+ RESERVE_SPACE(4);
+ WRITE32(RPC_AUTH_UNIX); /* auth_sys */
+
+ /* authsys_parms rfc1831 */
+ RESERVE_SPACE(4);
+ WRITE32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
+ len = scnprintf(machine_name, sizeof(machine_name), "%s",
+ clp->cl_ipaddr);
+ RESERVE_SPACE(16 + len);
+ WRITE32(len);
+ WRITEMEM(machine_name, len);
+ WRITE32(0); /* UID */
+ WRITE32(0); /* GID */
+ WRITE32(0); /* No more gids */
+ hdr->nops++;
+ hdr->replen += decode_create_session_maxsz;
+}
+
+static void encode_destroy_session(struct xdr_stream *xdr,
+ struct nfs4_session *session,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ RESERVE_SPACE(4 + NFS4_MAX_SESSIONID_LEN);
+ WRITE32(OP_DESTROY_SESSION);
+ WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ hdr->nops++;
+ hdr->replen += decode_destroy_session_maxsz;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static void encode_sequence(struct xdr_stream *xdr,
+ const struct nfs4_sequence_args *args,
+ struct compound_hdr *hdr)
+{
+#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_session *session = args->sa_session;
+ struct nfs4_slot_table *tp;
+ struct nfs4_slot *slot;
+ __be32 *p;
+
+ if (!session)
+ return;
+
+ tp = &session->fc_slot_table;
+
+ WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
+ slot = tp->slots + args->sa_slotid;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_SEQUENCE);
+
+ /*
+ * Sessionid + seqid + slotid + max slotid + cache_this
+ */
+ dprintk("%s: sessionid=%u:%u:%u:%u seqid=%d slotid=%d "
+ "max_slotid=%d cache_this=%d\n",
+ __func__,
+ ((u32 *)session->sess_id.data)[0],
+ ((u32 *)session->sess_id.data)[1],
+ ((u32 *)session->sess_id.data)[2],
+ ((u32 *)session->sess_id.data)[3],
+ slot->seq_nr, args->sa_slotid,
+ tp->highest_used_slotid, args->sa_cache_this);
+ RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 16);
+ WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ WRITE32(slot->seq_nr);
+ WRITE32(args->sa_slotid);
+ WRITE32(tp->highest_used_slotid);
+ WRITE32(args->sa_cache_this);
+ hdr->nops++;
+ hdr->replen += decode_sequence_maxsz;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* END OF "GENERIC" ENCODE ROUTINES.
*/
+static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (args->sa_session)
+ return args->sa_session->clp->cl_minorversion;
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
/*
* Encode an ACCESS request
*/
@@ -1351,11 +1701,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_access(&xdr, args->access, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1370,11 +1721,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_lookup(&xdr, args->name, &hdr);
encode_getfh(&xdr, &hdr);
@@ -1390,11 +1742,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putrootfh(&xdr, &hdr);
encode_getfh(&xdr, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1409,11 +1762,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_remove(&xdr, &args->name, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1428,11 +1782,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->old_dir, &hdr);
encode_savefh(&xdr, &hdr);
encode_putfh(&xdr, args->new_dir, &hdr);
@@ -1451,11 +1806,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
@@ -1474,11 +1830,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_create(&xdr, args, &hdr);
@@ -1505,11 +1862,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
encode_nops(&hdr);
@@ -1523,11 +1881,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_close(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1542,11 +1901,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_open(&xdr, args, &hdr);
@@ -1569,7 +1929,7 @@
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open_confirm(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1583,11 +1943,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1602,11 +1963,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open_downgrade(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1621,11 +1983,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_lock(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1639,11 +2002,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_lockt(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1657,11 +2021,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_locku(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1675,22 +2040,16 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- unsigned int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_readlink(&xdr, args, req, &hdr);
- /* set up reply kvec
- * toplevel_status + taglen + rescount + OP_PUTFH + status
- * + OP_READLINK + status + string length = 8
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readlink_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->pglen);
encode_nops(&hdr);
return 0;
@@ -1703,25 +2062,19 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_readdir(&xdr, args, req, &hdr);
- /* set up reply kvec
- * toplevel_status + taglen + rescount + OP_PUTFH + status
- * + OP_READDIR + status + verifer(2) = 9
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readdir_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->count);
dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
- __func__, replen, args->pages,
+ __func__, hdr.replen << 2, args->pages,
args->pgbase, args->count);
encode_nops(&hdr);
return 0;
@@ -1732,24 +2085,18 @@
*/
static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_read(&xdr, args, &hdr);
- /* set up reply kvec
- * toplevel status + taglen=0 + rescount + OP_PUTFH + status
- * + OP_READ + status + eof + datalen = 9
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_read_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
args->pages, args->pgbase, args->count);
req->rq_rcv_buf.flags |= XDRBUF_READ;
encode_nops(&hdr);
@@ -1763,11 +2110,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_setattr(&xdr, args, args->server, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1783,20 +2131,19 @@
struct nfs_getaclargs *args)
{
struct xdr_stream xdr;
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- int replen;
+ uint32_t replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
+ replen = hdr.replen + nfs4_fattr_bitmap_maxsz + 1;
encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
- /* set up reply buffer: */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_getacl_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen,
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
args->acl_pages, args->acl_pgbase, args->acl_len);
encode_nops(&hdr);
return 0;
@@ -1809,11 +2156,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_write(&xdr, args, &hdr);
req->rq_snd_buf.flags |= XDRBUF_WRITE;
@@ -1829,11 +2177,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_commit(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1848,11 +2197,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_fsinfo(&xdr, args->bitmask, &hdr);
encode_nops(&hdr);
@@ -1866,11 +2216,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
&hdr);
@@ -1885,11 +2236,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
@@ -1900,16 +2252,18 @@
/*
* GETATTR_BITMAP request
*/
-static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p, const struct nfs_fh *fhandle)
+static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_server_caps_arg *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
- encode_putfh(&xdr, fhandle, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_putfh(&xdr, args->fhandle, &hdr);
encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
FATTR4_WORD0_LINK_SUPPORT|
FATTR4_WORD0_SYMLINK_SUPPORT|
@@ -1929,7 +2283,7 @@
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_renew(&xdr, clp, &hdr);
encode_nops(&hdr);
return 0;
@@ -1946,7 +2300,7 @@
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_setclientid(&xdr, sc, &hdr);
encode_nops(&hdr);
return 0;
@@ -1964,7 +2318,7 @@
const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_setclientid_confirm(&xdr, clp, &hdr);
encode_putrootfh(&xdr, &hdr);
encode_fsinfo(&xdr, lease_bitmap, &hdr);
@@ -1979,11 +2333,12 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fhandle, &hdr);
encode_delegreturn(&xdr, args->stateid, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1998,28 +2353,119 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- int replen;
+ uint32_t replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_lookup(&xdr, args->name, &hdr);
+ replen = hdr.replen; /* get the attribute into args->page */
encode_fs_locations(&xdr, args->bitmask, &hdr);
- /* set up reply
- * toplevel_status + OP_PUTFH + status
- * + OP_LOOKUP + status + OP_GETATTR + status = 7
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + 7) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, &args->page,
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
0, PAGE_SIZE);
encode_nops(&hdr);
return 0;
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * EXCHANGE_ID request
+ */
+static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
+ struct nfs41_exchange_id_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = args->client->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_exchange_id(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a CREATE_SESSION request
+ */
+static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
+ struct nfs41_create_session_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = args->client->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_create_session(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a DESTROY_SESSION request
+ */
+static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_session *session)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = session->clp->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_destroy_session(&xdr, session, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a SEQUENCE request
+ */
+static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_sequence_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(args),
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a GET_LEASE_TIME request
+ */
+static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_get_lease_time_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
+ };
+ const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->la_seq_args, &hdr);
+ encode_putrootfh(&xdr, &hdr);
+ encode_fsinfo(&xdr, lease_bitmap, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* START OF "GENERIC" DECODE ROUTINES.
* These may look a little ugly since they are imported from a "generic"
@@ -3657,7 +4103,7 @@
return decode_op_hdr(xdr, OP_SAVEFH);
}
-static int decode_setattr(struct xdr_stream *xdr, struct nfs_setattrres *res)
+static int decode_setattr(struct xdr_stream *xdr)
{
__be32 *p;
uint32_t bmlen;
@@ -3735,6 +4181,169 @@
return decode_op_hdr(xdr, OP_DELEGRETURN);
}
+#if defined(CONFIG_NFS_V4_1)
+static int decode_exchange_id(struct xdr_stream *xdr,
+ struct nfs41_exchange_id_res *res)
+{
+ __be32 *p;
+ uint32_t dummy;
+ int status;
+ struct nfs_client *clp = res->client;
+
+ status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
+ if (status)
+ return status;
+
+ READ_BUF(8);
+ READ64(clp->cl_ex_clid);
+ READ_BUF(12);
+ READ32(clp->cl_seqid);
+ READ32(clp->cl_exchange_flags);
+
+ /* We ask for SP4_NONE */
+ READ32(dummy);
+ if (dummy != SP4_NONE)
+ return -EIO;
+
+ /* Throw away minor_id */
+ READ_BUF(8);
+
+ /* Throw away Major id */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ /* Throw away server_scope */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ /* Throw away Implementation id array */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ return 0;
+}
+
+static int decode_chan_attrs(struct xdr_stream *xdr,
+ struct nfs4_channel_attrs *attrs)
+{
+ __be32 *p;
+ u32 nr_attrs;
+
+ READ_BUF(28);
+ READ32(attrs->headerpadsz);
+ READ32(attrs->max_rqst_sz);
+ READ32(attrs->max_resp_sz);
+ READ32(attrs->max_resp_sz_cached);
+ READ32(attrs->max_ops);
+ READ32(attrs->max_reqs);
+ READ32(nr_attrs);
+ if (unlikely(nr_attrs > 1)) {
+ printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n",
+ __func__, nr_attrs);
+ return -EINVAL;
+ }
+ if (nr_attrs == 1)
+ READ_BUF(4); /* skip rdma_attrs */
+ return 0;
+}
+
+static int decode_create_session(struct xdr_stream *xdr,
+ struct nfs41_create_session_res *res)
+{
+ __be32 *p;
+ int status;
+ struct nfs_client *clp = res->client;
+ struct nfs4_session *session = clp->cl_session;
+
+ status = decode_op_hdr(xdr, OP_CREATE_SESSION);
+
+ if (status)
+ return status;
+
+ /* sessionid */
+ READ_BUF(NFS4_MAX_SESSIONID_LEN);
+ COPYMEM(&session->sess_id, NFS4_MAX_SESSIONID_LEN);
+
+ /* seqid, flags */
+ READ_BUF(8);
+ READ32(clp->cl_seqid);
+ READ32(session->flags);
+
+ /* Channel attributes */
+ status = decode_chan_attrs(xdr, &session->fc_attrs);
+ if (!status)
+ status = decode_chan_attrs(xdr, &session->bc_attrs);
+ return status;
+}
+
+static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_DESTROY_SESSION);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static int decode_sequence(struct xdr_stream *xdr,
+ struct nfs4_sequence_res *res,
+ struct rpc_rqst *rqstp)
+{
+#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_slot *slot;
+ struct nfs4_sessionid id;
+ u32 dummy;
+ int status;
+ __be32 *p;
+
+ if (!res->sr_session)
+ return 0;
+
+ status = decode_op_hdr(xdr, OP_SEQUENCE);
+ if (status)
+ goto out_err;
+
+ /*
+ * If the server returns different values for sessionID, slotID or
+ * sequence number, the server is looney tunes.
+ */
+ status = -ESERVERFAULT;
+
+ slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
+ READ_BUF(NFS4_MAX_SESSIONID_LEN + 20);
+ COPYMEM(id.data, NFS4_MAX_SESSIONID_LEN);
+ if (memcmp(id.data, res->sr_session->sess_id.data,
+ NFS4_MAX_SESSIONID_LEN)) {
+ dprintk("%s Invalid session id\n", __func__);
+ goto out_err;
+ }
+ /* seqid */
+ READ32(dummy);
+ if (dummy != slot->seq_nr) {
+ dprintk("%s Invalid sequence number\n", __func__);
+ goto out_err;
+ }
+ /* slot id */
+ READ32(dummy);
+ if (dummy != res->sr_slotid) {
+ dprintk("%s Invalid slot id\n", __func__);
+ goto out_err;
+ }
+ /* highest slot id - currently not processed */
+ READ32(dummy);
+ /* target highest slot id - currently not processed */
+ READ32(dummy);
+ /* result flags - currently not processed */
+ READ32(dummy);
+ status = 0;
+out_err:
+ res->sr_status = status;
+ return status;
+#else /* CONFIG_NFS_V4_1 */
+ return 0;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* END OF "GENERIC" DECODE ROUTINES.
*/
@@ -3752,6 +4361,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -3773,7 +4385,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
status = decode_putfh(&xdr);
if (status != 0)
@@ -3796,7 +4412,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3819,7 +4439,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putrootfh(&xdr)) != 0)
goto out;
@@ -3839,7 +4463,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3860,7 +4488,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3890,7 +4522,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3923,7 +4559,11 @@
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3963,6 +4603,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -3979,12 +4622,13 @@
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
status = encode_setacl(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -3995,7 +4639,8 @@
* Decode SETACL response
*/
static int
-nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p, void *res)
+nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs_setaclres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4005,10 +4650,13 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_setattr(&xdr, res);
+ status = decode_setattr(&xdr);
out:
return status;
}
@@ -4017,7 +4665,8 @@
* Decode GETACL response
*/
static int
-nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p, size_t *acl_len)
+nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs_getaclres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4027,10 +4676,13 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_getacl(&xdr, rqstp, acl_len);
+ status = decode_getacl(&xdr, rqstp, &res->acl_len);
out:
return status;
@@ -4049,6 +4701,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4079,6 +4734,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4133,6 +4791,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4157,10 +4818,13 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_setattr(&xdr, res);
+ status = decode_setattr(&xdr);
if (status)
goto out;
decode_getfattr(&xdr, res->fattr, res->server);
@@ -4181,6 +4845,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4202,6 +4869,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4223,6 +4893,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4234,7 +4907,8 @@
/*
* Decode READLINK response
*/
-static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p, void *res)
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs4_readlink_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4244,6 +4918,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4265,6 +4942,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4286,6 +4966,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4309,6 +4992,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4335,6 +5021,9 @@
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4349,7 +5038,8 @@
/*
* FSINFO request
*/
-static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_fsinfo_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4358,16 +5048,19 @@
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_fsinfo(&xdr, fsinfo);
+ status = decode_fsinfo(&xdr, res->fsinfo);
return status;
}
/*
* PATHCONF request
*/
-static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *pathconf)
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_pathconf_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4376,16 +5069,19 @@
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_pathconf(&xdr, pathconf);
+ status = decode_pathconf(&xdr, res->pathconf);
return status;
}
/*
* STATFS request
*/
-static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *fsstat)
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_statfs_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4394,9 +5090,11 @@
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_statfs(&xdr, fsstat);
+ status = decode_statfs(&xdr, res->fsstat);
return status;
}
@@ -4410,7 +5108,11 @@
int status;
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -4483,7 +5185,10 @@
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
- if (status != 0)
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
status = decode_putfh(&xdr);
if (status != 0)
@@ -4497,7 +5202,8 @@
/*
* FS_LOCATIONS request
*/
-static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations *res)
+static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_fs_locations_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4505,18 +5211,113 @@
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
- if (status != 0)
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
if ((status = decode_lookup(&xdr)) != 0)
goto out;
xdr_enter_page(&xdr, PAGE_SIZE);
- status = decode_getfattr(&xdr, &res->fattr, res->server);
+ status = decode_getfattr(&xdr, &res->fs_locations->fattr,
+ res->fs_locations->server);
out:
return status;
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * EXCHANGE_ID request
+ */
+static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
+ void *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_exchange_id(&xdr, res);
+ return status;
+}
+
+/*
+ * a CREATE_SESSION request
+ */
+static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs41_create_session_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_create_session(&xdr, res);
+ return status;
+}
+
+/*
+ * a DESTROY_SESSION request
+ */
+static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
+ void *dummy)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_destroy_session(&xdr, dummy);
+ return status;
+}
+
+/*
+ * a SEQUENCE request
+ */
+static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_sequence_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_sequence(&xdr, res, rqstp);
+ return status;
+}
+
+/*
+ * a GET_LEASE_TIME request
+ */
+static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_get_lease_time_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_sequence(&xdr, &res->lr_seq_res, rqstp);
+ if (!status)
+ status = decode_putrootfh(&xdr);
+ if (!status)
+ status = decode_fsinfo(&xdr, res->lr_fsinfo);
+ return status;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
__be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
{
uint32_t bitmap[2] = {0};
@@ -4686,6 +5487,13 @@
PROC(GETACL, enc_getacl, dec_getacl),
PROC(SETACL, enc_setacl, dec_setacl),
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
+#if defined(CONFIG_NFS_V4_1)
+ PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
+ PROC(CREATE_SESSION, enc_create_session, dec_create_session),
+ PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
+ PROC(SEQUENCE, enc_sequence, dec_sequence),
+ PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+#endif /* CONFIG_NFS_V4_1 */
};
struct rpc_version nfs_version4 = {
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index e3ed590..8c55b27 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -92,6 +92,9 @@
#undef NFSROOT_DEBUG
#define NFSDBG_FACILITY NFSDBG_ROOT
+/* Default port to use if server is not running a portmapper */
+#define NFS_MNT_PORT 627
+
/* Default path we try to mount. "%s" gets replaced by our IP address */
#define NFS_ROOT "/tftpboot/%s"
@@ -487,6 +490,7 @@
{
struct nfs_fh fh;
struct sockaddr_in sin;
+ unsigned int auth_flav_len = 0;
struct nfs_mount_request request = {
.sap = (struct sockaddr *)&sin,
.salen = sizeof(sin),
@@ -496,6 +500,7 @@
.protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP,
.fh = &fh,
+ .auth_flav_len = &auth_flav_len,
};
int status;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 4ace3c5..96c4ebf 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -22,6 +22,7 @@
#include <asm/system.h>
+#include "nfs4_fs.h"
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
@@ -46,6 +47,7 @@
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
@@ -357,19 +359,25 @@
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
- return;
+ goto out;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
- return;
+ goto out;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
+ return;
+out:
+ nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
+ &data->res.seq_res);
+ return;
+
}
/*
@@ -406,7 +414,23 @@
nfs_readdata_release(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_read_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+
+ if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
+ &data->args.seq_args, &data->res.seq_res,
+ 0, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_read_partial_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_readpage_result_partial,
.rpc_release = nfs_readpage_release_partial,
};
@@ -470,6 +494,9 @@
}
static const struct rpc_call_ops nfs_read_full_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_readpage_result_full,
.rpc_release = nfs_readpage_release_full,
};
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 26127b6..0b4cbdc 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -42,6 +42,8 @@
#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
+#include <linux/mnt_namespace.h>
+#include <linux/namei.h>
#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/inet.h>
@@ -90,6 +92,7 @@
Opt_mountport,
Opt_mountvers,
Opt_nfsvers,
+ Opt_minorversion,
/* Mount options that take string arguments */
Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
@@ -139,22 +142,23 @@
{ Opt_fscache_uniq, "fsc=%s" },
{ Opt_nofscache, "nofsc" },
- { Opt_port, "port=%u" },
- { Opt_rsize, "rsize=%u" },
- { Opt_wsize, "wsize=%u" },
- { Opt_bsize, "bsize=%u" },
- { Opt_timeo, "timeo=%u" },
- { Opt_retrans, "retrans=%u" },
- { Opt_acregmin, "acregmin=%u" },
- { Opt_acregmax, "acregmax=%u" },
- { Opt_acdirmin, "acdirmin=%u" },
- { Opt_acdirmax, "acdirmax=%u" },
- { Opt_actimeo, "actimeo=%u" },
- { Opt_namelen, "namlen=%u" },
- { Opt_mountport, "mountport=%u" },
- { Opt_mountvers, "mountvers=%u" },
- { Opt_nfsvers, "nfsvers=%u" },
- { Opt_nfsvers, "vers=%u" },
+ { Opt_port, "port=%s" },
+ { Opt_rsize, "rsize=%s" },
+ { Opt_wsize, "wsize=%s" },
+ { Opt_bsize, "bsize=%s" },
+ { Opt_timeo, "timeo=%s" },
+ { Opt_retrans, "retrans=%s" },
+ { Opt_acregmin, "acregmin=%s" },
+ { Opt_acregmax, "acregmax=%s" },
+ { Opt_acdirmin, "acdirmin=%s" },
+ { Opt_acdirmax, "acdirmax=%s" },
+ { Opt_actimeo, "actimeo=%s" },
+ { Opt_namelen, "namlen=%s" },
+ { Opt_mountport, "mountport=%s" },
+ { Opt_mountvers, "mountvers=%s" },
+ { Opt_nfsvers, "nfsvers=%s" },
+ { Opt_nfsvers, "vers=%s" },
+ { Opt_minorversion, "minorversion=%u" },
{ Opt_sec, "sec=%s" },
{ Opt_proto, "proto=%s" },
@@ -270,10 +274,14 @@
#ifdef CONFIG_NFS_V4
static int nfs4_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static int nfs4_remote_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static int nfs4_xdev_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static int nfs4_referral_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static void nfs4_kill_super(struct super_block *sb);
static struct file_system_type nfs4_fs_type = {
@@ -284,6 +292,14 @@
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+static struct file_system_type nfs4_remote_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .get_sb = nfs4_remote_get_sb,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
struct file_system_type nfs4_xdev_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
@@ -292,6 +308,14 @@
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+static struct file_system_type nfs4_remote_referral_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .get_sb = nfs4_remote_referral_get_sb,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
struct file_system_type nfs4_referral_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
@@ -514,7 +538,6 @@
const char *nostr;
} nfs_info[] = {
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
- { NFS_MOUNT_INTR, ",intr", ",nointr" },
{ NFS_MOUNT_POSIX, ",posix", "" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
@@ -943,11 +966,6 @@
return 1;
}
-static void nfs_parse_invalid_value(const char *option)
-{
- dfprintk(MOUNT, "NFS: bad value specified for %s option\n", option);
-}
-
/*
* Error-check and convert a string of mount options from user space into
* a data structure. The whole mount string is processed; bad options are
@@ -958,7 +976,7 @@
struct nfs_parsed_mount_data *mnt)
{
char *p, *string, *secdata;
- int rc, sloppy = 0, errors = 0;
+ int rc, sloppy = 0, invalid_option = 0;
if (!raw) {
dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
@@ -982,7 +1000,9 @@
while ((p = strsep(&raw, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
- int option, token;
+ unsigned long option;
+ int int_option;
+ int token;
if (!*p)
continue;
@@ -1091,114 +1111,156 @@
* options that take numeric values
*/
case Opt_port:
- if (match_int(args, &option) ||
- option < 0 || option > USHORT_MAX) {
- errors++;
- nfs_parse_invalid_value("port");
- } else
- mnt->nfs_server.port = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option > USHORT_MAX)
+ goto out_invalid_value;
+ mnt->nfs_server.port = option;
break;
case Opt_rsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("rsize");
- } else
- mnt->rsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->rsize = option;
break;
case Opt_wsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("wsize");
- } else
- mnt->wsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->wsize = option;
break;
case Opt_bsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("bsize");
- } else
- mnt->bsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->bsize = option;
break;
case Opt_timeo:
- if (match_int(args, &option) || option <= 0) {
- errors++;
- nfs_parse_invalid_value("timeo");
- } else
- mnt->timeo = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option == 0)
+ goto out_invalid_value;
+ mnt->timeo = option;
break;
case Opt_retrans:
- if (match_int(args, &option) || option <= 0) {
- errors++;
- nfs_parse_invalid_value("retrans");
- } else
- mnt->retrans = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option == 0)
+ goto out_invalid_value;
+ mnt->retrans = option;
break;
case Opt_acregmin:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acregmin");
- } else
- mnt->acregmin = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmin = option;
break;
case Opt_acregmax:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acregmax");
- } else
- mnt->acregmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmax = option;
break;
case Opt_acdirmin:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acdirmin");
- } else
- mnt->acdirmin = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acdirmin = option;
break;
case Opt_acdirmax:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acdirmax");
- } else
- mnt->acdirmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acdirmax = option;
break;
case Opt_actimeo:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("actimeo");
- } else
- mnt->acregmin = mnt->acregmax =
- mnt->acdirmin = mnt->acdirmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmin = mnt->acregmax =
+ mnt->acdirmin = mnt->acdirmax = option;
break;
case Opt_namelen:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("namlen");
- } else
- mnt->namlen = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->namlen = option;
break;
case Opt_mountport:
- if (match_int(args, &option) ||
- option < 0 || option > USHORT_MAX) {
- errors++;
- nfs_parse_invalid_value("mountport");
- } else
- mnt->mount_server.port = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option > USHORT_MAX)
+ goto out_invalid_value;
+ mnt->mount_server.port = option;
break;
case Opt_mountvers:
- if (match_int(args, &option) ||
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 ||
option < NFS_MNT_VERSION ||
- option > NFS_MNT3_VERSION) {
- errors++;
- nfs_parse_invalid_value("mountvers");
- } else
- mnt->mount_server.version = option;
+ option > NFS_MNT3_VERSION)
+ goto out_invalid_value;
+ mnt->mount_server.version = option;
break;
case Opt_nfsvers:
- if (match_int(args, &option)) {
- errors++;
- nfs_parse_invalid_value("nfsvers");
- break;
- }
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
switch (option) {
case NFS2_VERSION:
mnt->flags &= ~NFS_MOUNT_VER3;
@@ -1207,10 +1269,16 @@
mnt->flags |= NFS_MOUNT_VER3;
break;
default:
- errors++;
- nfs_parse_invalid_value("nfsvers");
+ goto out_invalid_value;
}
break;
+ case Opt_minorversion:
+ if (match_int(args, &int_option))
+ return 0;
+ if (int_option < 0 || int_option > NFS4_MAX_MINOR_VERSION)
+ return 0;
+ mnt->minorversion = int_option;
+ break;
/*
* options that take text values
@@ -1222,9 +1290,9 @@
rc = nfs_parse_security_flavors(string, mnt);
kfree(string);
if (!rc) {
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"security flavor\n");
+ return 0;
}
break;
case Opt_proto:
@@ -1238,23 +1306,25 @@
case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ kfree(string);
break;
case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ kfree(string);
break;
case Opt_xprt_rdma:
/* vector side protocols to TCP */
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
xprt_load_transport(string);
+ kfree(string);
break;
default:
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"transport protocol\n");
+ return 0;
}
- kfree(string);
break;
case Opt_mountproto:
string = match_strdup(args);
@@ -1273,9 +1343,9 @@
break;
case Opt_xprt_rdma: /* not used for side protocols */
default:
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"transport protocol\n");
+ return 0;
}
break;
case Opt_addr:
@@ -1331,9 +1401,9 @@
mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE;
break;
default:
- errors++;
dfprintk(MOUNT, "NFS: invalid "
"lookupcache argument\n");
+ return 0;
};
break;
@@ -1351,20 +1421,20 @@
break;
default:
- errors++;
+ invalid_option = 1;
dfprintk(MOUNT, "NFS: unrecognized mount option "
"'%s'\n", p);
}
}
- if (errors > 0) {
- dfprintk(MOUNT, "NFS: parsing encountered %d error%s\n",
- errors, (errors == 1 ? "" : "s"));
- if (!sloppy)
- return 0;
- }
+ if (!sloppy && invalid_option)
+ return 0;
+
return 1;
+out_invalid_value:
+ printk(KERN_INFO "NFS: bad mount option value specified: %s \n", p);
+ return 0;
out_nomem:
printk(KERN_INFO "NFS: not enough memory to parse option\n");
return 0;
@@ -1381,6 +1451,7 @@
static int nfs_try_mount(struct nfs_parsed_mount_data *args,
struct nfs_fh *root_fh)
{
+ unsigned int auth_flavor_len = 0;
struct nfs_mount_request request = {
.sap = (struct sockaddr *)
&args->mount_server.address,
@@ -1388,6 +1459,7 @@
.protocol = args->mount_server.protocol,
.fh = root_fh,
.noresvport = args->flags & NFS_MOUNT_NORESVPORT,
+ .auth_flav_len = &auth_flavor_len,
};
int status;
@@ -2240,6 +2312,11 @@
nfs_initialise_sb(sb);
}
+static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
+{
+ args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3);
+}
+
/*
* Validate NFSv4 mount options
*/
@@ -2263,6 +2340,7 @@
args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */
args->auth_flavors[0] = RPC_AUTH_UNIX;
args->auth_flavor_len = 0;
+ args->minorversion = 0;
switch (data->version) {
case 1:
@@ -2336,6 +2414,8 @@
nfs_validate_transport_protocol(args);
+ nfs4_validate_mount_flags(args);
+
if (args->auth_flavor_len > 1)
goto out_inval_auth;
@@ -2375,12 +2455,12 @@
}
/*
- * Get the superblock for an NFS4 mountpoint
+ * Get the superblock for the NFS4 root partition
*/
-static int nfs4_get_sb(struct file_system_type *fs_type,
+static int nfs4_remote_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
{
- struct nfs_parsed_mount_data *data;
+ struct nfs_parsed_mount_data *data = raw_data;
struct super_block *s;
struct nfs_server *server;
struct nfs_fh *mntfh;
@@ -2391,18 +2471,12 @@
};
int error = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
if (data == NULL || mntfh == NULL)
goto out_free_fh;
security_init_mnt_opts(&data->lsm_opts);
- /* Validate the mount data */
- error = nfs4_validate_mount_data(raw_data, data, dev_name);
- if (error < 0)
- goto out;
-
/* Get a volume representation */
server = nfs4_create_server(data, mntfh);
if (IS_ERR(server)) {
@@ -2415,7 +2489,7 @@
compare_super = NULL;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto out_free;
@@ -2452,14 +2526,9 @@
error = 0;
out:
- kfree(data->client_address);
- kfree(data->nfs_server.export_path);
- kfree(data->nfs_server.hostname);
- kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
kfree(mntfh);
- kfree(data);
return error;
out_free:
@@ -2473,16 +2542,137 @@
goto out;
}
+static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
+ int flags, void *data, const char *hostname)
+{
+ struct vfsmount *root_mnt;
+ char *root_devname;
+ size_t len;
+
+ len = strlen(hostname) + 3;
+ root_devname = kmalloc(len, GFP_KERNEL);
+ if (root_devname == NULL)
+ return ERR_PTR(-ENOMEM);
+ snprintf(root_devname, len, "%s:/", hostname);
+ root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
+ kfree(root_devname);
+ return root_mnt;
+}
+
+static void nfs_fix_devname(const struct path *path, struct vfsmount *mnt)
+{
+ char *page = (char *) __get_free_page(GFP_KERNEL);
+ char *devname, *tmp;
+
+ if (page == NULL)
+ return;
+ devname = nfs_path(path->mnt->mnt_devname,
+ path->mnt->mnt_root, path->dentry,
+ page, PAGE_SIZE);
+ if (devname == NULL)
+ goto out_freepage;
+ tmp = kstrdup(devname, GFP_KERNEL);
+ if (tmp == NULL)
+ goto out_freepage;
+ kfree(mnt->mnt_devname);
+ mnt->mnt_devname = tmp;
+out_freepage:
+ free_page((unsigned long)page);
+}
+
+static int nfs_follow_remote_path(struct vfsmount *root_mnt,
+ const char *export_path, struct vfsmount *mnt_target)
+{
+ struct mnt_namespace *ns_private;
+ struct nameidata nd;
+ struct super_block *s;
+ int ret;
+
+ ns_private = create_mnt_ns(root_mnt);
+ ret = PTR_ERR(ns_private);
+ if (IS_ERR(ns_private))
+ goto out_mntput;
+
+ ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
+ export_path, LOOKUP_FOLLOW, &nd);
+
+ put_mnt_ns(ns_private);
+
+ if (ret != 0)
+ goto out_err;
+
+ s = nd.path.mnt->mnt_sb;
+ atomic_inc(&s->s_active);
+ mnt_target->mnt_sb = s;
+ mnt_target->mnt_root = dget(nd.path.dentry);
+
+ /* Correct the device pathname */
+ nfs_fix_devname(&nd.path, mnt_target);
+
+ path_put(&nd.path);
+ down_write(&s->s_umount);
+ return 0;
+out_mntput:
+ mntput(root_mnt);
+out_err:
+ return ret;
+}
+
+/*
+ * Get the superblock for an NFS4 mountpoint
+ */
+static int nfs4_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
+{
+ struct nfs_parsed_mount_data *data;
+ char *export_path;
+ struct vfsmount *root_mnt;
+ int error = -ENOMEM;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ goto out_free_data;
+
+ /* Validate the mount data */
+ error = nfs4_validate_mount_data(raw_data, data, dev_name);
+ if (error < 0)
+ goto out;
+
+ export_path = data->nfs_server.export_path;
+ data->nfs_server.export_path = "/";
+ root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+ data->nfs_server.hostname);
+ data->nfs_server.export_path = export_path;
+
+ error = PTR_ERR(root_mnt);
+ if (IS_ERR(root_mnt))
+ goto out;
+
+ error = nfs_follow_remote_path(root_mnt, export_path, mnt);
+
+out:
+ kfree(data->client_address);
+ kfree(data->nfs_server.export_path);
+ kfree(data->nfs_server.hostname);
+ kfree(data->fscache_uniq);
+out_free_data:
+ kfree(data);
+ dprintk("<-- nfs4_get_sb() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return error;
+}
+
static void nfs4_kill_super(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
+ dprintk("--> %s\n", __func__);
nfs_super_return_all_delegations(sb);
kill_anon_super(sb);
-
nfs4_renewd_prepare_shutdown(server);
nfs_fscache_release_super_cookie(sb);
nfs_free_server(server);
+ dprintk("<-- %s\n", __func__);
}
/*
@@ -2568,12 +2758,9 @@
return error;
}
-/*
- * Create an NFS4 server record on referral traversal
- */
-static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data,
- struct vfsmount *mnt)
+static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data,
+ struct vfsmount *mnt)
{
struct nfs_clone_mount *data = raw_data;
struct super_block *s;
@@ -2652,4 +2839,36 @@
return error;
}
+/*
+ * Create an NFS4 server record on referral traversal
+ */
+static int nfs4_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data,
+ struct vfsmount *mnt)
+{
+ struct nfs_clone_mount *data = raw_data;
+ char *export_path;
+ struct vfsmount *root_mnt;
+ int error;
+
+ dprintk("--> nfs4_referral_get_sb()\n");
+
+ export_path = data->mnt_path;
+ data->mnt_path = "/";
+
+ root_mnt = nfs_do_root_mount(&nfs4_remote_referral_fs_type,
+ flags, data, data->hostname);
+ data->mnt_path = export_path;
+
+ error = PTR_ERR(root_mnt);
+ if (IS_ERR(root_mnt))
+ goto out;
+
+ error = nfs_follow_remote_path(root_mnt, export_path, mnt);
+out:
+ dprintk("<-- nfs4_referral_get_sb() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return error;
+}
+
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index ecc2953..1064c91 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include "internal.h"
+#include "nfs4_fs.h"
struct nfs_unlinkdata {
struct hlist_node list;
@@ -82,7 +83,7 @@
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
}
/**
@@ -102,9 +103,25 @@
nfs_sb_deactive(sb);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_unlinkdata *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->dir);
+
+ if (nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_unlink_ops = {
.rpc_call_done = nfs_async_unlink_done,
.rpc_release = nfs_async_unlink_release,
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_unlink_prepare,
+#endif /* CONFIG_NFS_V4_1 */
};
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
@@ -241,6 +258,7 @@
status = PTR_ERR(data->cred);
goto out_free;
}
+ data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
status = -EBUSY;
spin_lock(&dentry->d_lock);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e560a78..ce72882 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -25,6 +25,7 @@
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
+#include "nfs4_fs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -52,6 +53,7 @@
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
return p;
}
@@ -71,6 +73,7 @@
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
@@ -1048,7 +1051,23 @@
nfs_writedata_release(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_write_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
+
+ if (nfs4_setup_sequence(clp, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_write_partial_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_partial,
.rpc_release = nfs_writeback_release_partial,
};
@@ -1111,6 +1130,9 @@
}
static const struct rpc_call_ops nfs_write_full_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_full,
.rpc_release = nfs_writeback_release_full,
};
@@ -1123,6 +1145,7 @@
{
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
+ struct nfs_server *server = NFS_SERVER(data->inode);
int status;
dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
@@ -1155,7 +1178,7 @@
if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)\n",
- NFS_SERVER(data->inode)->nfs_client->cl_hostname,
+ server->nfs_client->cl_hostname,
resp->verf->committed, argp->stable);
complain = jiffies + 300 * HZ;
}
@@ -1181,7 +1204,7 @@
*/
argp->stable = NFS_FILE_SYNC;
}
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
@@ -1193,6 +1216,7 @@
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
+ nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
return 0;
}
@@ -1349,6 +1373,9 @@
}
static const struct rpc_call_ops nfs_commit_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_commit_done,
.rpc_release = nfs_commit_release,
};
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8b1f8ef..b92a276 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -464,16 +464,11 @@
if (err)
return err;
/*
- * Just a quick sanity check; we could also try to check
- * whether this pseudoflavor is supported, but at worst
- * an unsupported pseudoflavor on the export would just
- * be a pseudoflavor that won't match the flavor of any
- * authenticated request. The administrator will
- * probably discover the problem when someone fails to
- * authenticate.
+ * XXX: It would be nice to also check whether this
+ * pseudoflavor is supported, so we can discover the
+ * problem at export time instead of when a client fails
+ * to authenticate.
*/
- if (f->pseudoflavor < 0)
- return -EINVAL;
err = get_int(mesg, &f->flags);
if (err)
return err;
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 7c9fe83..a713c41 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -652,8 +652,6 @@
* NFSv3 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
-#define nfs3svc_decode_voidargs NULL
-#define nfs3svc_release_void NULL
#define nfs3svc_decode_fhandleargs nfs3svc_decode_fhandle
#define nfs3svc_encode_attrstatres nfs3svc_encode_attrstat
#define nfs3svc_encode_wccstatres nfs3svc_encode_wccstat
@@ -686,28 +684,219 @@
#define WC (7+pAT) /* WCC attributes */
static struct svc_procedure nfsd_procedures3[22] = {
- PROC(null, void, void, void, RC_NOCACHE, ST),
- PROC(getattr, fhandle, attrstat, fhandle, RC_NOCACHE, ST+AT),
- PROC(setattr, sattr, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(lookup, dirop, dirop, fhandle2, RC_NOCACHE, ST+FH+pAT+pAT),
- PROC(access, access, access, fhandle, RC_NOCACHE, ST+pAT+1),
- PROC(readlink, readlink, readlink, fhandle, RC_NOCACHE, ST+pAT+1+NFS3_MAXPATHLEN/4),
- PROC(read, read, read, fhandle, RC_NOCACHE, ST+pAT+4+NFSSVC_MAXBLKSIZE/4),
- PROC(write, write, write, fhandle, RC_REPLBUFF, ST+WC+4),
- PROC(create, create, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(mkdir, mkdir, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(symlink, symlink, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(mknod, mknod, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(remove, dirop, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(rmdir, dirop, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(rename, rename, rename, fhandle2, RC_REPLBUFF, ST+WC+WC),
- PROC(link, link, link, fhandle2, RC_REPLBUFF, ST+pAT+WC),
- PROC(readdir, readdir, readdir, fhandle, RC_NOCACHE, 0),
- PROC(readdirplus,readdirplus, readdir, fhandle, RC_NOCACHE, 0),
- PROC(fsstat, fhandle, fsstat, void, RC_NOCACHE, ST+pAT+2*6+1),
- PROC(fsinfo, fhandle, fsinfo, void, RC_NOCACHE, ST+pAT+12),
- PROC(pathconf, fhandle, pathconf, void, RC_NOCACHE, ST+pAT+6),
- PROC(commit, commit, commit, fhandle, RC_NOCACHE, ST+WC+2),
+ [NFS3PROC_NULL] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_null,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd3_voidargs),
+ .pc_ressize = sizeof(struct nfsd3_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFS3PROC_GETATTR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_getattr,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_attrstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_attrstatres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFS3PROC_SETATTR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_setattr,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_sattrargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_sattrargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_LOOKUP] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_lookup,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_diropres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+FH+pAT+pAT,
+ },
+ [NFS3PROC_ACCESS] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_access,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_accessargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_accessres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_accessargs),
+ .pc_ressize = sizeof(struct nfsd3_accessres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+1,
+ },
+ [NFS3PROC_READLINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readlink,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readlinkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readlinkres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readlinkargs),
+ .pc_ressize = sizeof(struct nfsd3_readlinkres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4,
+ },
+ [NFS3PROC_READ] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_read,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readargs),
+ .pc_ressize = sizeof(struct nfsd3_readres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4,
+ },
+ [NFS3PROC_WRITE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_write,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_writeargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_writeres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_writeargs),
+ .pc_ressize = sizeof(struct nfsd3_writeres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC+4,
+ },
+ [NFS3PROC_CREATE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_create,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_createargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_MKDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_mkdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_mkdirargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_mkdirargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_SYMLINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_symlink,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_symlinkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_symlinkargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_MKNOD] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_mknod,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_mknodargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_mknodargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_REMOVE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_remove,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_RMDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_rmdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_RENAME] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_rename,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_renameargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_renameres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_renameargs),
+ .pc_ressize = sizeof(struct nfsd3_renameres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC+WC,
+ },
+ [NFS3PROC_LINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_link,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_linkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_linkres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_linkargs),
+ .pc_ressize = sizeof(struct nfsd3_linkres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+pAT+WC,
+ },
+ [NFS3PROC_READDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readdirargs),
+ .pc_ressize = sizeof(struct nfsd3_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFS3PROC_READDIRPLUS] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readdirplus,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirplusargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readdirplusargs),
+ .pc_ressize = sizeof(struct nfsd3_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFS3PROC_FSSTAT] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_fsstat,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_fsstatres,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_fsstatres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+2*6+1,
+ },
+ [NFS3PROC_FSINFO] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_fsinfo,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_fsinfores,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_fsinfores),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+12,
+ },
+ [NFS3PROC_PATHCONF] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_pathconf,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_pathconfres,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_pathconfres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+6,
+ },
+ [NFS3PROC_COMMIT] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_commit,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_commitargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_commitres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_commitargs),
+ .pc_ressize = sizeof(struct nfsd3_commitres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+WC+2,
+ },
};
struct svc_version nfsd_version3 = {
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 17d0dd9..01d4ec1 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -272,6 +272,7 @@
err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
&fhp->fh_post_attr);
+ fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
if (err)
fhp->fh_post_saved = 0;
else
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 290289b..3fd23f7 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -140,8 +140,10 @@
int status;
u32 ident;
u32 nops;
+ __be32 *nops_p;
+ u32 minorversion;
u32 taglen;
- char * tag;
+ char *tag;
};
static struct {
@@ -201,33 +203,39 @@
* XDR encode
*/
-static int
+static void
encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
{
__be32 * p;
RESERVE_SPACE(16);
WRITE32(0); /* tag length is always 0 */
- WRITE32(NFS4_MINOR_VERSION);
+ WRITE32(hdr->minorversion);
WRITE32(hdr->ident);
+ hdr->nops_p = p;
WRITE32(hdr->nops);
- return 0;
}
-static int
-encode_cb_recall(struct xdr_stream *xdr, struct nfs4_cb_recall *cb_rec)
+static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
+{
+ *hdr->nops_p = htonl(hdr->nops);
+}
+
+static void
+encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
+ struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
- int len = cb_rec->cbr_fh.fh_size;
+ int len = dp->dl_fh.fh_size;
- RESERVE_SPACE(12+sizeof(cb_rec->cbr_stateid) + len);
+ RESERVE_SPACE(12+sizeof(dp->dl_stateid) + len);
WRITE32(OP_CB_RECALL);
- WRITE32(cb_rec->cbr_stateid.si_generation);
- WRITEMEM(&cb_rec->cbr_stateid.si_opaque, sizeof(stateid_opaque_t));
- WRITE32(cb_rec->cbr_trunc);
+ WRITE32(dp->dl_stateid.si_generation);
+ WRITEMEM(&dp->dl_stateid.si_opaque, sizeof(stateid_opaque_t));
+ WRITE32(0); /* truncate optimization not implemented */
WRITE32(len);
- WRITEMEM(&cb_rec->cbr_fh.fh_base, len);
- return 0;
+ WRITEMEM(&dp->dl_fh.fh_base, len);
+ hdr->nops++;
}
static int
@@ -241,17 +249,18 @@
}
static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_cb_recall *args)
+nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_delegation *args)
{
struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr = {
- .ident = args->cbr_ident,
- .nops = 1,
+ .ident = args->dl_ident,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_cb_compound_hdr(&xdr, &hdr);
- return (encode_cb_recall(&xdr, args));
+ encode_cb_recall(&xdr, args, &hdr);
+ encode_cb_nops(&hdr);
+ return 0;
}
@@ -358,18 +367,21 @@
.pipe_dir_name = "/nfsd4_cb",
};
+static int max_cb_time(void)
+{
+ return max(NFSD_LEASE_TIME/10, (time_t)1) * HZ;
+}
+
/* Reference counting, callback cleanup, etc., all look racy as heck.
* And why is cb_set an atomic? */
-static struct rpc_clnt *setup_callback_client(struct nfs4_client *clp)
+int setup_callback_client(struct nfs4_client *clp)
{
struct sockaddr_in addr;
- struct nfs4_callback *cb = &clp->cl_callback;
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_timeout timeparms = {
- .to_initval = (NFSD_LEASE_TIME/4) * HZ,
- .to_retries = 5,
- .to_maxval = (NFSD_LEASE_TIME/2) * HZ,
- .to_exponential = 1,
+ .to_initval = max_cb_time(),
+ .to_retries = 0,
};
struct rpc_create_args args = {
.protocol = IPPROTO_TCP,
@@ -386,7 +398,7 @@
struct rpc_clnt *client;
if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Initialize address */
memset(&addr, 0, sizeof(addr));
@@ -396,48 +408,77 @@
/* Create RPC client */
client = rpc_create(&args);
- if (IS_ERR(client))
+ if (IS_ERR(client)) {
dprintk("NFSD: couldn't create callback client: %ld\n",
PTR_ERR(client));
- return client;
+ return PTR_ERR(client);
+ }
+ cb->cb_client = client;
+ return 0;
}
-static int do_probe_callback(void *data)
+static void warn_no_callback_path(struct nfs4_client *clp, int reason)
{
- struct nfs4_client *clp = data;
- struct nfs4_callback *cb = &clp->cl_callback;
+ dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
+ (int)clp->cl_name.len, clp->cl_name.data, reason);
+}
+
+static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_client *clp = calldata;
+
+ if (task->tk_status)
+ warn_no_callback_path(clp, task->tk_status);
+ else
+ atomic_set(&clp->cl_cb_conn.cb_set, 1);
+ put_nfs4_client(clp);
+}
+
+static const struct rpc_call_ops nfsd4_cb_probe_ops = {
+ .rpc_call_done = nfsd4_cb_probe_done,
+};
+
+static struct rpc_cred *lookup_cb_cred(struct nfs4_cb_conn *cb)
+{
+ struct auth_cred acred = {
+ .machine_cred = 1
+ };
+
+ /*
+ * Note in the gss case this doesn't actually have to wait for a
+ * gss upcall (or any calls to the client); this just creates a
+ * non-uptodate cred which the rpc state machine will fill in with
+ * a refresh_upcall later.
+ */
+ return rpcauth_lookup_credcache(cb->cb_client->cl_auth, &acred,
+ RPCAUTH_LOOKUP_NEW);
+}
+
+void do_probe_callback(struct nfs4_client *clp)
+{
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
.rpc_argp = clp,
};
- struct rpc_clnt *client;
+ struct rpc_cred *cred;
int status;
- client = setup_callback_client(clp);
- if (IS_ERR(client)) {
- status = PTR_ERR(client);
- dprintk("NFSD: couldn't create callback client: %d\n",
- status);
- goto out_err;
+ cred = lookup_cb_cred(cb);
+ if (IS_ERR(cred)) {
+ status = PTR_ERR(cred);
+ goto out;
}
-
- status = rpc_call_sync(client, &msg, RPC_TASK_SOFT);
-
- if (status)
- goto out_release_client;
-
- cb->cb_client = client;
- atomic_set(&cb->cb_set, 1);
- put_nfs4_client(clp);
- return 0;
-out_release_client:
- rpc_shutdown_client(client);
-out_err:
- dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
- (int)clp->cl_name.len, clp->cl_name.data, status);
- put_nfs4_client(clp);
- return 0;
+ cb->cb_cred = cred;
+ msg.rpc_cred = cb->cb_cred;
+ status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_SOFT,
+ &nfsd4_cb_probe_ops, (void *)clp);
+out:
+ if (status) {
+ warn_no_callback_path(clp, status);
+ put_nfs4_client(clp);
+ }
}
/*
@@ -446,21 +487,65 @@
void
nfsd4_probe_callback(struct nfs4_client *clp)
{
- struct task_struct *t;
+ int status;
- BUG_ON(atomic_read(&clp->cl_callback.cb_set));
+ BUG_ON(atomic_read(&clp->cl_cb_conn.cb_set));
+
+ status = setup_callback_client(clp);
+ if (status) {
+ warn_no_callback_path(clp, status);
+ return;
+ }
/* the task holds a reference to the nfs4_client struct */
atomic_inc(&clp->cl_count);
- t = kthread_run(do_probe_callback, clp, "nfs4_cb_probe");
-
- if (IS_ERR(t))
- atomic_dec(&clp->cl_count);
-
- return;
+ do_probe_callback(clp);
}
+static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+
+ switch (task->tk_status) {
+ case -EIO:
+ /* Network partition? */
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ warn_no_callback_path(clp, task->tk_status);
+ case -EBADHANDLE:
+ case -NFS4ERR_BAD_STATEID:
+ /* Race: client probably got cb_recall
+ * before open reply granting delegation */
+ break;
+ default:
+ /* success, or error we can't handle */
+ return;
+ }
+ if (dp->dl_retries--) {
+ rpc_delay(task, 2*HZ);
+ task->tk_status = 0;
+ rpc_restart_call(task);
+ } else {
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ warn_no_callback_path(clp, task->tk_status);
+ }
+}
+
+static void nfsd4_cb_recall_release(void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+
+ nfs4_put_delegation(dp);
+ put_nfs4_client(clp);
+}
+
+static const struct rpc_call_ops nfsd4_cb_recall_ops = {
+ .rpc_call_done = nfsd4_cb_recall_done,
+ .rpc_release = nfsd4_cb_recall_release,
+};
+
/*
* called with dp->dl_count inc'ed.
*/
@@ -468,41 +553,19 @@
nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_client;
- struct rpc_clnt *clnt = clp->cl_callback.cb_client;
- struct nfs4_cb_recall *cbr = &dp->dl_recall;
+ struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
- .rpc_argp = cbr,
+ .rpc_argp = dp,
+ .rpc_cred = clp->cl_cb_conn.cb_cred
};
- int retries = 1;
- int status = 0;
+ int status;
- cbr->cbr_trunc = 0; /* XXX need to implement truncate optimization */
- cbr->cbr_dp = dp;
-
- status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
- while (retries--) {
- switch (status) {
- case -EIO:
- /* Network partition? */
- atomic_set(&clp->cl_callback.cb_set, 0);
- case -EBADHANDLE:
- case -NFS4ERR_BAD_STATEID:
- /* Race: client probably got cb_recall
- * before open reply granting delegation */
- break;
- default:
- goto out_put_cred;
- }
- ssleep(2);
- status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
+ dp->dl_retries = 1;
+ status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
+ &nfsd4_cb_recall_ops, dp);
+ if (status) {
+ put_nfs4_client(clp);
+ nfs4_put_delegation(dp);
}
-out_put_cred:
- /*
- * Success or failure, now we're either waiting for lease expiration
- * or deleg_return.
- */
- put_nfs4_client(clp);
- nfs4_put_delegation(dp);
- return;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index b2883e9..7c88017 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -51,6 +51,78 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
+static u32 nfsd_attrmask[] = {
+ NFSD_WRITEABLE_ATTRS_WORD0,
+ NFSD_WRITEABLE_ATTRS_WORD1,
+ NFSD_WRITEABLE_ATTRS_WORD2
+};
+
+static u32 nfsd41_ex_attrmask[] = {
+ NFSD_SUPPATTR_EXCLCREAT_WORD0,
+ NFSD_SUPPATTR_EXCLCREAT_WORD1,
+ NFSD_SUPPATTR_EXCLCREAT_WORD2
+};
+
+static __be32
+check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ u32 *bmval, u32 *writable)
+{
+ struct dentry *dentry = cstate->current_fh.fh_dentry;
+ struct svc_export *exp = cstate->current_fh.fh_export;
+
+ /*
+ * Check about attributes are supported by the NFSv4 server or not.
+ * According to spec, unsupported attributes return ERR_ATTRNOTSUPP.
+ */
+ if ((bmval[0] & ~nfsd_suppattrs0(cstate->minorversion)) ||
+ (bmval[1] & ~nfsd_suppattrs1(cstate->minorversion)) ||
+ (bmval[2] & ~nfsd_suppattrs2(cstate->minorversion)))
+ return nfserr_attrnotsupp;
+
+ /*
+ * Check FATTR4_WORD0_ACL & FATTR4_WORD0_FS_LOCATIONS can be supported
+ * in current environment or not.
+ */
+ if (bmval[0] & FATTR4_WORD0_ACL) {
+ if (!IS_POSIXACL(dentry->d_inode))
+ return nfserr_attrnotsupp;
+ }
+ if (bmval[0] & FATTR4_WORD0_FS_LOCATIONS) {
+ if (exp->ex_fslocs.locations == NULL)
+ return nfserr_attrnotsupp;
+ }
+
+ /*
+ * According to spec, read-only attributes return ERR_INVAL.
+ */
+ if (writable) {
+ if ((bmval[0] & ~writable[0]) || (bmval[1] & ~writable[1]) ||
+ (bmval[2] & ~writable[2]))
+ return nfserr_inval;
+ }
+
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_check_open_attributes(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
+{
+ __be32 status = nfs_ok;
+
+ if (open->op_create == NFS4_OPEN_CREATE) {
+ if (open->op_createmode == NFS4_CREATE_UNCHECKED
+ || open->op_createmode == NFS4_CREATE_GUARDED)
+ status = check_attr_support(rqstp, cstate,
+ open->op_bmval, nfsd_attrmask);
+ else if (open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1)
+ status = check_attr_support(rqstp, cstate,
+ open->op_bmval, nfsd41_ex_attrmask);
+ }
+
+ return status;
+}
+
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
@@ -225,6 +297,10 @@
if (status)
goto out;
+ status = nfsd4_check_open_attributes(rqstp, cstate, open);
+ if (status)
+ goto out;
+
/* Openowner is now set, so sequence id will get bumped. Now we need
* these checks before we do any creates: */
status = nfserr_grace;
@@ -395,6 +471,11 @@
if (status)
return status;
+ status = check_attr_support(rqstp, cstate, create->cr_bmval,
+ nfsd_attrmask);
+ if (status)
+ return status;
+
switch (create->cr_type) {
case NF4LNK:
/* ugh! we have to null-terminate the linktext, or
@@ -689,6 +770,12 @@
if (status)
return status;
status = nfs_ok;
+
+ status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
+ nfsd_attrmask);
+ if (status)
+ goto out;
+
if (setattr->sa_acl != NULL)
status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
setattr->sa_acl);
@@ -763,10 +850,10 @@
if (status)
return status;
- if ((verify->ve_bmval[0] & ~nfsd_suppattrs0(cstate->minorversion))
- || (verify->ve_bmval[1] & ~nfsd_suppattrs1(cstate->minorversion))
- || (verify->ve_bmval[2] & ~nfsd_suppattrs2(cstate->minorversion)))
- return nfserr_attrnotsupp;
+ status = check_attr_support(rqstp, cstate, verify->ve_bmval, NULL);
+ if (status)
+ return status;
+
if ((verify->ve_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)
|| (verify->ve_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1))
return nfserr_inval;
@@ -1226,24 +1313,9 @@
return "unknown_operation";
}
-#define nfs4svc_decode_voidargs NULL
-#define nfs4svc_release_void NULL
#define nfsd4_voidres nfsd4_voidargs
-#define nfs4svc_release_compound NULL
struct nfsd4_voidargs { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsd4_proc_##name, \
- (kxdrproc_t) nfs4svc_decode_##argt##args, \
- (kxdrproc_t) nfs4svc_encode_##rest##res, \
- (kxdrproc_t) nfs4svc_release_##relt, \
- sizeof(struct nfsd4_##argt##args), \
- sizeof(struct nfsd4_##rest##res), \
- 0, \
- cache, \
- respsize, \
- }
-
/*
* TODO: At the present time, the NFSv4 server does not do XID caching
* of requests. Implementing XID caching would not be a serious problem,
@@ -1255,8 +1327,23 @@
* better XID's.
*/
static struct svc_procedure nfsd_procedures4[2] = {
- PROC(null, void, void, void, RC_NOCACHE, 1),
- PROC(compound, compound, compound, compound, RC_NOCACHE, NFSD_BUFSIZE/4)
+ [NFSPROC4_NULL] = {
+ .pc_func = (svc_procfunc) nfsd4_proc_null,
+ .pc_encode = (kxdrproc_t) nfs4svc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd4_voidargs),
+ .pc_ressize = sizeof(struct nfsd4_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = 1,
+ },
+ [NFSPROC4_COMPOUND] = {
+ .pc_func = (svc_procfunc) nfsd4_proc_compound,
+ .pc_decode = (kxdrproc_t) nfs4svc_decode_compoundargs,
+ .pc_encode = (kxdrproc_t) nfs4svc_encode_compoundres,
+ .pc_argsize = sizeof(struct nfsd4_compoundargs),
+ .pc_ressize = sizeof(struct nfsd4_compoundres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = NFSD_BUFSIZE/4,
+ },
};
struct svc_version nfsd_version4 = {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3b711f5..980a216 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -182,7 +182,7 @@
{
struct nfs4_delegation *dp;
struct nfs4_file *fp = stp->st_file;
- struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback;
+ struct nfs4_cb_conn *cb = &stp->st_stateowner->so_client->cl_cb_conn;
dprintk("NFSD alloc_init_deleg\n");
if (fp->fi_had_conflict)
@@ -203,10 +203,8 @@
get_file(stp->st_vfs_file);
dp->dl_vfs_file = stp->st_vfs_file;
dp->dl_type = type;
- dp->dl_recall.cbr_dp = NULL;
- dp->dl_recall.cbr_ident = cb->cb_ident;
- dp->dl_recall.cbr_trunc = 0;
- dp->dl_stateid.si_boot = boot_time;
+ dp->dl_ident = cb->cb_ident;
+ dp->dl_stateid.si_boot = get_seconds();
dp->dl_stateid.si_stateownerid = current_delegid++;
dp->dl_stateid.si_fileid = 0;
dp->dl_stateid.si_generation = 0;
@@ -427,6 +425,11 @@
{
int status = 0, np = fchan->maxreqs * NFSD_PAGES_PER_SLOT;
+ if (fchan->maxreqs < 1)
+ return nfserr_inval;
+ else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
+ fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
+
spin_lock(&nfsd_serv->sv_lock);
if (np + nfsd_serv->sv_drc_pages_used > nfsd_serv->sv_drc_max_pages)
np = nfsd_serv->sv_drc_max_pages - nfsd_serv->sv_drc_pages_used;
@@ -446,8 +449,8 @@
* fchan holds the client values on input, and the server values on output
*/
static int init_forechannel_attrs(struct svc_rqst *rqstp,
- struct nfsd4_session *session,
- struct nfsd4_channel_attrs *fchan)
+ struct nfsd4_channel_attrs *session_fchan,
+ struct nfsd4_channel_attrs *fchan)
{
int status = 0;
__u32 maxcount = svc_max_payload(rqstp);
@@ -457,21 +460,21 @@
/* Use the client's max request and max response size if possible */
if (fchan->maxreq_sz > maxcount)
fchan->maxreq_sz = maxcount;
- session->se_fmaxreq_sz = fchan->maxreq_sz;
+ session_fchan->maxreq_sz = fchan->maxreq_sz;
if (fchan->maxresp_sz > maxcount)
fchan->maxresp_sz = maxcount;
- session->se_fmaxresp_sz = fchan->maxresp_sz;
+ session_fchan->maxresp_sz = fchan->maxresp_sz;
/* Set the max response cached size our default which is
* a multiple of PAGE_SIZE and small */
- session->se_fmaxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE;
- fchan->maxresp_cached = session->se_fmaxresp_cached;
+ session_fchan->maxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE;
+ fchan->maxresp_cached = session_fchan->maxresp_cached;
/* Use the client's maxops if possible */
if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND)
fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND;
- session->se_fmaxops = fchan->maxops;
+ session_fchan->maxops = fchan->maxops;
/* try to use the client requested number of slots */
if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
@@ -483,7 +486,7 @@
*/
status = set_forechannel_maxreqs(fchan);
- session->se_fnumslots = fchan->maxreqs;
+ session_fchan->maxreqs = fchan->maxreqs;
return status;
}
@@ -497,12 +500,14 @@
memset(&tmp, 0, sizeof(tmp));
/* FIXME: For now, we just accept the client back channel attributes. */
- status = init_forechannel_attrs(rqstp, &tmp, &cses->fore_channel);
+ tmp.se_bchannel = cses->back_channel;
+ status = init_forechannel_attrs(rqstp, &tmp.se_fchannel,
+ &cses->fore_channel);
if (status)
goto out;
/* allocate struct nfsd4_session and slot table in one piece */
- slotsize = tmp.se_fnumslots * sizeof(struct nfsd4_slot);
+ slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot);
new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL);
if (!new)
goto out;
@@ -576,7 +581,7 @@
int i;
ses = container_of(kref, struct nfsd4_session, se_ref);
- for (i = 0; i < ses->se_fnumslots; i++) {
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry;
nfsd4_release_respages(e->ce_respages, e->ce_resused);
}
@@ -632,16 +637,20 @@
static void
shutdown_callback_client(struct nfs4_client *clp)
{
- struct rpc_clnt *clnt = clp->cl_callback.cb_client;
+ struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
if (clnt) {
/*
* Callback threads take a reference on the client, so there
* should be no outstanding callbacks at this point.
*/
- clp->cl_callback.cb_client = NULL;
+ clp->cl_cb_conn.cb_client = NULL;
rpc_shutdown_client(clnt);
}
+ if (clp->cl_cb_conn.cb_cred) {
+ put_rpccred(clp->cl_cb_conn.cb_cred);
+ clp->cl_cb_conn.cb_cred = NULL;
+ }
}
static inline void
@@ -714,7 +723,7 @@
return NULL;
memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
atomic_set(&clp->cl_count, 1);
- atomic_set(&clp->cl_callback.cb_set, 0);
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_strhash);
INIT_LIST_HEAD(&clp->cl_openowners);
@@ -966,7 +975,7 @@
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
{
- struct nfs4_callback *cb = &clp->cl_callback;
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
/* Currently, we only support tcp for the callback channel */
if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3))
@@ -975,6 +984,7 @@
if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val,
&cb->cb_addr, &cb->cb_port)))
goto out_err;
+ cb->cb_minorversion = 0;
cb->cb_prog = se->se_callback_prog;
cb->cb_ident = se->se_callback_ident;
return;
@@ -1128,7 +1138,7 @@
* is sent (lease renewal).
*/
if (seq && nfsd4_not_cached(resp)) {
- seq->maxslots = resp->cstate.session->se_fnumslots;
+ seq->maxslots = resp->cstate.session->se_fchannel.maxreqs;
return nfs_ok;
}
@@ -1238,12 +1248,6 @@
expire_client(conf);
goto out_new;
}
- if (ip_addr != conf->cl_addr &&
- !(exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A)) {
- /* Client collision. 18.35.4 case 3 */
- status = nfserr_clid_inuse;
- goto out;
- }
/*
* Set bit when the owner id and verifier map to an already
* confirmed client id (18.35.3).
@@ -1257,12 +1261,12 @@
copy_verf(conf, &verf);
new = conf;
goto out_copy;
- } else {
- /* 18.35.4 case 7 */
- if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
- status = nfserr_noent;
- goto out;
- }
+ }
+
+ /* 18.35.4 case 7 */
+ if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+ status = nfserr_noent;
+ goto out;
}
unconf = find_unconfirmed_client_by_str(dname, strhashval, true);
@@ -1471,7 +1475,7 @@
goto out;
status = nfserr_badslot;
- if (seq->slotid >= session->se_fnumslots)
+ if (seq->slotid >= session->se_fchannel.maxreqs)
goto out;
slot = &session->se_slots[seq->slotid];
@@ -1686,9 +1690,7 @@
else {
/* XXX: We just turn off callbacks until we can handle
* change request correctly. */
- atomic_set(&conf->cl_callback.cb_set, 0);
- gen_confirm(conf);
- nfsd4_remove_clid_dir(unconf);
+ atomic_set(&conf->cl_cb_conn.cb_set, 0);
expire_client(unconf);
status = nfs_ok;
@@ -1882,7 +1884,7 @@
stp->st_stateowner = sop;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = boot_time;
+ stp->st_stateid.si_boot = get_seconds();
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
@@ -2059,19 +2061,6 @@
}
/*
- * Recall a delegation
- */
-static int
-do_recall(void *__dp)
-{
- struct nfs4_delegation *dp = __dp;
-
- dp->dl_file->fi_had_conflict = true;
- nfsd4_cb_recall(dp);
- return 0;
-}
-
-/*
* Spawn a thread to perform a recall on the delegation represented
* by the lease (file_lock)
*
@@ -2082,8 +2071,7 @@
static
void nfsd_break_deleg_cb(struct file_lock *fl)
{
- struct nfs4_delegation *dp= (struct nfs4_delegation *)fl->fl_owner;
- struct task_struct *t;
+ struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
if (!dp)
@@ -2111,16 +2099,8 @@
*/
fl->fl_break_time = 0;
- t = kthread_run(do_recall, dp, "%s", "nfs4_cb_recall");
- if (IS_ERR(t)) {
- struct nfs4_client *clp = dp->dl_client;
-
- printk(KERN_INFO "NFSD: Callback thread failed for "
- "for client (clientid %08x/%08x)\n",
- clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
- put_nfs4_client(dp->dl_client);
- nfs4_put_delegation(dp);
- }
+ dp->dl_file->fi_had_conflict = true;
+ nfsd4_cb_recall(dp);
}
/*
@@ -2422,7 +2402,7 @@
{
struct nfs4_delegation *dp;
struct nfs4_stateowner *sop = stp->st_stateowner;
- struct nfs4_callback *cb = &sop->so_client->cl_callback;
+ struct nfs4_cb_conn *cb = &sop->so_client->cl_cb_conn;
struct file_lock fl, *flp = &fl;
int status, flag = 0;
@@ -2614,7 +2594,7 @@
renew_client(clp);
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
- && !atomic_read(&clp->cl_callback.cb_set))
+ && !atomic_read(&clp->cl_cb_conn.cb_set))
goto out;
status = nfs_ok;
out:
@@ -2738,12 +2718,42 @@
static int
STALE_STATEID(stateid_t *stateid)
{
- if (stateid->si_boot == boot_time)
- return 0;
- dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
- stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
- stateid->si_generation);
- return 1;
+ if (time_after((unsigned long)boot_time,
+ (unsigned long)stateid->si_boot)) {
+ dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+EXPIRED_STATEID(stateid_t *stateid)
+{
+ if (time_before((unsigned long)boot_time,
+ ((unsigned long)stateid->si_boot)) &&
+ time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) {
+ dprintk("NFSD: expired stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return 1;
+ }
+ return 0;
+}
+
+static __be32
+stateid_error_map(stateid_t *stateid)
+{
+ if (STALE_STATEID(stateid))
+ return nfserr_stale_stateid;
+ if (EXPIRED_STATEID(stateid))
+ return nfserr_expired;
+
+ dprintk("NFSD: bad stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return nfserr_bad_stateid;
}
static inline int
@@ -2867,8 +2877,10 @@
status = nfserr_bad_stateid;
if (is_delegation_stateid(stateid)) {
dp = find_delegation_stateid(ino, stateid);
- if (!dp)
+ if (!dp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
status = check_stateid_generation(stateid, &dp->dl_stateid,
flags);
if (status)
@@ -2881,8 +2893,10 @@
*filpp = dp->dl_vfs_file;
} else { /* open or lock stateid */
stp = find_stateid(stateid, flags);
- if (!stp)
+ if (!stp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
if (nfs4_check_fh(current_fh, stp))
goto out;
if (!stp->st_stateowner->so_confirmed)
@@ -2956,7 +2970,7 @@
*/
sop = search_close_lru(stateid->si_stateownerid, flags);
if (sop == NULL)
- return nfserr_bad_stateid;
+ return stateid_error_map(stateid);
*sopp = sop;
goto check_replay;
}
@@ -3227,8 +3241,10 @@
if (!is_delegation_stateid(stateid))
goto out;
dp = find_delegation_stateid(inode, stateid);
- if (!dp)
+ if (!dp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
status = check_stateid_generation(stateid, &dp->dl_stateid, flags);
if (status)
goto out;
@@ -3455,7 +3471,7 @@
stp->st_stateowner = sop;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = boot_time;
+ stp->st_stateid.si_boot = get_seconds();
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
@@ -3987,6 +4003,7 @@
INIT_LIST_HEAD(&conf_str_hashtbl[i]);
INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
+ INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
}
for (i = 0; i < SESSION_HASH_SIZE; i++)
INIT_LIST_HEAD(&sessionid_hashtbl[i]);
@@ -4009,8 +4026,6 @@
INIT_LIST_HEAD(&close_lru);
INIT_LIST_HEAD(&client_lru);
INIT_LIST_HEAD(&del_recall_lru);
- for (i = 0; i < CLIENT_HASH_SIZE; i++)
- INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
reclaim_str_hashtbl_size = 0;
return 0;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b73549d..2dcc7fe 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -83,16 +83,6 @@
return 0;
}
-/*
- * START OF "GENERIC" DECODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
#define DECODE_HEAD \
__be32 *p; \
__be32 status
@@ -254,20 +244,8 @@
DECODE_TAIL;
}
-static u32 nfsd_attrmask[] = {
- NFSD_WRITEABLE_ATTRS_WORD0,
- NFSD_WRITEABLE_ATTRS_WORD1,
- NFSD_WRITEABLE_ATTRS_WORD2
-};
-
-static u32 nfsd41_ex_attrmask[] = {
- NFSD_SUPPATTR_EXCLCREAT_WORD0,
- NFSD_SUPPATTR_EXCLCREAT_WORD1,
- NFSD_SUPPATTR_EXCLCREAT_WORD2
-};
-
static __be32
-nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, u32 *writable,
+nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
struct iattr *iattr, struct nfs4_acl **acl)
{
int expected_len, len = 0;
@@ -280,18 +258,6 @@
if ((status = nfsd4_decode_bitmap(argp, bmval)))
return status;
- /*
- * According to spec, unsupported attributes return ERR_ATTRNOTSUPP;
- * read-only attributes return ERR_INVAL.
- */
- if ((bmval[0] & ~nfsd_suppattrs0(argp->minorversion)) ||
- (bmval[1] & ~nfsd_suppattrs1(argp->minorversion)) ||
- (bmval[2] & ~nfsd_suppattrs2(argp->minorversion)))
- return nfserr_attrnotsupp;
- if ((bmval[0] & ~writable[0]) || (bmval[1] & ~writable[1]) ||
- (bmval[2] & ~writable[2]))
- return nfserr_inval;
-
READ_BUF(4);
READ32(expected_len);
@@ -424,8 +390,11 @@
goto xdr_error;
}
}
- BUG_ON(bmval[2]); /* no such writeable attr supported yet */
- if (len != expected_len)
+ if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
+ || bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
+ || bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2)
+ READ_BUF(expected_len - len);
+ else if (len != expected_len)
goto xdr_error;
DECODE_TAIL;
@@ -518,8 +487,8 @@
if ((status = check_filename(create->cr_name, create->cr_namelen, nfserr_inval)))
return status;
- status = nfsd4_decode_fattr(argp, create->cr_bmval, nfsd_attrmask,
- &create->cr_iattr, &create->cr_acl);
+ status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
+ &create->cr_acl);
if (status)
goto out;
@@ -682,7 +651,7 @@
case NFS4_CREATE_UNCHECKED:
case NFS4_CREATE_GUARDED:
status = nfsd4_decode_fattr(argp, open->op_bmval,
- nfsd_attrmask, &open->op_iattr, &open->op_acl);
+ &open->op_iattr, &open->op_acl);
if (status)
goto out;
break;
@@ -696,8 +665,7 @@
READ_BUF(8);
COPYMEM(open->op_verf.data, 8);
status = nfsd4_decode_fattr(argp, open->op_bmval,
- nfsd41_ex_attrmask, &open->op_iattr,
- &open->op_acl);
+ &open->op_iattr, &open->op_acl);
if (status)
goto out;
break;
@@ -893,8 +861,8 @@
status = nfsd4_decode_stateid(argp, &setattr->sa_stateid);
if (status)
return status;
- return nfsd4_decode_fattr(argp, setattr->sa_bmval, nfsd_attrmask,
- &setattr->sa_iattr, &setattr->sa_acl);
+ return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr,
+ &setattr->sa_acl);
}
static __be32
@@ -1328,64 +1296,64 @@
};
static nfsd4_dec nfsd41_dec_ops[] = {
- [OP_ACCESS] (nfsd4_dec)nfsd4_decode_access,
- [OP_CLOSE] (nfsd4_dec)nfsd4_decode_close,
- [OP_COMMIT] (nfsd4_dec)nfsd4_decode_commit,
- [OP_CREATE] (nfsd4_dec)nfsd4_decode_create,
- [OP_DELEGPURGE] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DELEGRETURN] (nfsd4_dec)nfsd4_decode_delegreturn,
- [OP_GETATTR] (nfsd4_dec)nfsd4_decode_getattr,
- [OP_GETFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_LINK] (nfsd4_dec)nfsd4_decode_link,
- [OP_LOCK] (nfsd4_dec)nfsd4_decode_lock,
- [OP_LOCKT] (nfsd4_dec)nfsd4_decode_lockt,
- [OP_LOCKU] (nfsd4_dec)nfsd4_decode_locku,
- [OP_LOOKUP] (nfsd4_dec)nfsd4_decode_lookup,
- [OP_LOOKUPP] (nfsd4_dec)nfsd4_decode_noop,
- [OP_NVERIFY] (nfsd4_dec)nfsd4_decode_verify,
- [OP_OPEN] (nfsd4_dec)nfsd4_decode_open,
- [OP_OPENATTR] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_CONFIRM] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_DOWNGRADE] (nfsd4_dec)nfsd4_decode_open_downgrade,
- [OP_PUTFH] (nfsd4_dec)nfsd4_decode_putfh,
- [OP_PUTPUBFH] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_PUTROOTFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_READ] (nfsd4_dec)nfsd4_decode_read,
- [OP_READDIR] (nfsd4_dec)nfsd4_decode_readdir,
- [OP_READLINK] (nfsd4_dec)nfsd4_decode_noop,
- [OP_REMOVE] (nfsd4_dec)nfsd4_decode_remove,
- [OP_RENAME] (nfsd4_dec)nfsd4_decode_rename,
- [OP_RENEW] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_RESTOREFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_SAVEFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_SECINFO] (nfsd4_dec)nfsd4_decode_secinfo,
- [OP_SETATTR] (nfsd4_dec)nfsd4_decode_setattr,
- [OP_SETCLIENTID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SETCLIENTID_CONFIRM](nfsd4_dec)nfsd4_decode_notsupp,
- [OP_VERIFY] (nfsd4_dec)nfsd4_decode_verify,
- [OP_WRITE] (nfsd4_dec)nfsd4_decode_write,
- [OP_RELEASE_LOCKOWNER] (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
+ [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
+ [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
+ [OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
+ [OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
+ [OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
+ [OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
+ [OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
+ [OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
+ [OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
+ [OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
+ [OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
+ [OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
+ [OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
+ [OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
+ [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_READ] = (nfsd4_dec)nfsd4_decode_read,
+ [OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
+ [OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
+ [OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
+ [OP_RENEW] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
+ [OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
+ [OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SETCLIENTID_CONFIRM]= (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
+ [OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
+ [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_notsupp,
/* new operations for NFSv4.1 */
- [OP_BACKCHANNEL_CTL] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_BIND_CONN_TO_SESSION](nfsd4_dec)nfsd4_decode_notsupp,
- [OP_EXCHANGE_ID] (nfsd4_dec)nfsd4_decode_exchange_id,
- [OP_CREATE_SESSION] (nfsd4_dec)nfsd4_decode_create_session,
- [OP_DESTROY_SESSION] (nfsd4_dec)nfsd4_decode_destroy_session,
- [OP_FREE_STATEID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GET_DIR_DELEGATION] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GETDEVICEINFO] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GETDEVICELIST] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTCOMMIT] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTGET] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTRETURN] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SECINFO_NO_NAME] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SEQUENCE] (nfsd4_dec)nfsd4_decode_sequence,
- [OP_SET_SSV] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_TEST_STATEID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_WANT_DELEGATION] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DESTROY_CLIENTID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_RECLAIM_COMPLETE] (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
+ [OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
+ [OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
+ [OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
+ [OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_notsupp,
};
struct nfsd4_minorversion_ops {
@@ -1489,21 +1457,6 @@
DECODE_TAIL;
}
-/*
- * END OF "GENERIC" DECODE ROUTINES.
- */
-
-/*
- * START OF "GENERIC" ENCODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
-#define ENCODE_HEAD __be32 *p
#define WRITE32(n) *p++ = htonl(n)
#define WRITE64(n) do { \
@@ -1515,13 +1468,41 @@
memcpy(p, ptr, nbytes); \
p += XDR_QUADLEN(nbytes); \
}} while (0)
-#define WRITECINFO(c) do { \
- *p++ = htonl(c.atomic); \
- *p++ = htonl(c.before_ctime_sec); \
- *p++ = htonl(c.before_ctime_nsec); \
- *p++ = htonl(c.after_ctime_sec); \
- *p++ = htonl(c.after_ctime_nsec); \
-} while (0)
+
+static void write32(__be32 **p, u32 n)
+{
+ *(*p)++ = n;
+}
+
+static void write64(__be32 **p, u64 n)
+{
+ write32(p, (u32)(n >> 32));
+ write32(p, (u32)n);
+}
+
+static void write_change(__be32 **p, struct kstat *stat, struct inode *inode)
+{
+ if (IS_I_VERSION(inode)) {
+ write64(p, inode->i_version);
+ } else {
+ write32(p, stat->ctime.tv_sec);
+ write32(p, stat->ctime.tv_nsec);
+ }
+}
+
+static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
+{
+ write32(p, c->atomic);
+ if (c->change_supported) {
+ write64(p, c->before_change);
+ write64(p, c->after_change);
+ } else {
+ write32(p, c->before_ctime_sec);
+ write32(p, c->before_ctime_nsec);
+ write32(p, c->after_ctime_sec);
+ write32(p, c->after_ctime_nsec);
+ }
+}
#define RESERVE_SPACE(nbytes) do { \
p = resp->p; \
@@ -1874,16 +1855,9 @@
WRITE32(NFS4_FH_PERSISTENT|NFS4_FH_VOL_RENAME);
}
if (bmval0 & FATTR4_WORD0_CHANGE) {
- /*
- * Note: This _must_ be consistent with the scheme for writing
- * change_info, so any changes made here must be reflected there
- * as well. (See xdr4.h:set_change_info() and the WRITECINFO()
- * macro above.)
- */
if ((buflen -= 8) < 0)
goto out_resource;
- WRITE32(stat.ctime.tv_sec);
- WRITE32(stat.ctime.tv_nsec);
+ write_change(&p, &stat, dentry->d_inode);
}
if (bmval0 & FATTR4_WORD0_SIZE) {
if ((buflen -= 8) < 0)
@@ -2348,7 +2322,7 @@
static void
nfsd4_encode_stateid(struct nfsd4_compoundres *resp, stateid_t *sid)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(sizeof(stateid_t));
WRITE32(sid->si_generation);
@@ -2359,7 +2333,7 @@
static __be32
nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8);
@@ -2386,7 +2360,7 @@
static __be32
nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8);
@@ -2399,11 +2373,11 @@
static __be32
nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(32);
- WRITECINFO(create->cr_cinfo);
+ write_cinfo(&p, &create->cr_cinfo);
WRITE32(2);
WRITE32(create->cr_bmval[0]);
WRITE32(create->cr_bmval[1]);
@@ -2435,7 +2409,7 @@
{
struct svc_fh *fhp = *fhpp;
unsigned int len;
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
len = fhp->fh_handle.fh_size;
@@ -2454,7 +2428,7 @@
static void
nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denied *ld)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(32 + XDR_LEN(ld->ld_sop ? ld->ld_sop->so_owner.len : 0));
WRITE64(ld->ld_start);
@@ -2510,11 +2484,11 @@
static __be32
nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(20);
- WRITECINFO(link->li_cinfo);
+ write_cinfo(&p, &link->li_cinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2524,7 +2498,7 @@
static __be32
nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
{
- ENCODE_HEAD;
+ __be32 *p;
ENCODE_SEQID_OP_HEAD;
if (nfserr)
@@ -2532,7 +2506,7 @@
nfsd4_encode_stateid(resp, &open->op_stateid);
RESERVE_SPACE(40);
- WRITECINFO(open->op_cinfo);
+ write_cinfo(&p, &open->op_cinfo);
WRITE32(open->op_rflags);
WRITE32(2);
WRITE32(open->op_bmval[0]);
@@ -2619,7 +2593,7 @@
int v, pn;
unsigned long maxcount;
long len;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2681,7 +2655,7 @@
{
int maxcount;
char *page;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2730,7 +2704,7 @@
int maxcount;
loff_t offset;
__be32 *page, *savep, *tailbase;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2806,11 +2780,11 @@
static __be32
nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(20);
- WRITECINFO(remove->rm_cinfo);
+ write_cinfo(&p, &remove->rm_cinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2819,12 +2793,12 @@
static __be32
nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(40);
- WRITECINFO(rename->rn_sinfo);
- WRITECINFO(rename->rn_tinfo);
+ write_cinfo(&p, &rename->rn_sinfo);
+ write_cinfo(&p, &rename->rn_tinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2839,7 +2813,7 @@
u32 nflavs;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
goto out;
@@ -2904,7 +2878,7 @@
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(12);
if (nfserr) {
@@ -2924,7 +2898,7 @@
static __be32
nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8 + sizeof(nfs4_verifier));
@@ -2944,7 +2918,7 @@
static __be32
nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(16);
@@ -2960,7 +2934,7 @@
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_exchange_id *exid)
{
- ENCODE_HEAD;
+ __be32 *p;
char *major_id;
char *server_scope;
int major_id_sz;
@@ -3015,7 +2989,7 @@
nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_create_session *sess)
{
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -3071,7 +3045,7 @@
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_sequence *seq)
{
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -3209,7 +3183,7 @@
dprintk("%s length %u, xb->page_len %u tlen %u pad %u\n", __func__,
length, xb->page_len, tlen, pad);
- if (length <= session->se_fmaxresp_cached)
+ if (length <= session->se_fchannel.maxresp_cached)
return status;
else
return nfserr_rep_too_big_to_cache;
@@ -3219,7 +3193,7 @@
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
__be32 *statp;
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(8);
WRITE32(op->opnum);
@@ -3253,7 +3227,7 @@
void
nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
- ENCODE_HEAD;
+ __be32 *p;
struct nfs4_replay *rp = op->replay;
BUG_ON(!rp);
@@ -3268,10 +3242,6 @@
ADJUST_ARGS();
}
-/*
- * END OF "GENERIC" ENCODE ROUTINES.
- */
-
int
nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 5bfc2ac..4638635 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -29,15 +29,24 @@
*/
#define CACHESIZE 1024
#define HASHSIZE 64
-#define REQHASH(xid) (((((__force __u32)xid) >> 24) ^ ((__force __u32)xid)) & (HASHSIZE-1))
-static struct hlist_head * hash_list;
+static struct hlist_head * cache_hash;
static struct list_head lru_head;
static int cache_disabled = 1;
+/*
+ * Calculate the hash index from an XID.
+ */
+static inline u32 request_hash(u32 xid)
+{
+ u32 h = xid;
+ h ^= (xid >> 24);
+ return h & (HASHSIZE-1);
+}
+
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
-/*
+/*
* locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held.
@@ -62,8 +71,8 @@
i--;
}
- hash_list = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
- if (!hash_list)
+ cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
+ if (!cache_hash)
goto out_nomem;
cache_disabled = 0;
@@ -88,8 +97,8 @@
cache_disabled = 1;
- kfree (hash_list);
- hash_list = NULL;
+ kfree (cache_hash);
+ cache_hash = NULL;
}
/*
@@ -108,7 +117,7 @@
hash_refile(struct svc_cacherep *rp)
{
hlist_del_init(&rp->c_hash);
- hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid));
+ hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
}
/*
@@ -138,7 +147,7 @@
spin_lock(&cache_lock);
rtn = RC_DOIT;
- rh = &hash_list[REQHASH(xid)];
+ rh = &cache_hash[request_hash(xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) {
if (rp->c_state != RC_UNUSED &&
xid == rp->c_xid && proc == rp->c_proc &&
@@ -165,8 +174,8 @@
}
}
- /* This should not happen */
- if (rp == NULL) {
+ /* All entries on the LRU are in-progress. This should not happen */
+ if (&rp->c_lru == &lru_head) {
static int complaints;
printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
@@ -264,7 +273,7 @@
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
len >>= 2;
-
+
/* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) {
rp->c_state = RC_UNUSED;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index af16849..1250fb9 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -207,10 +207,14 @@
static ssize_t write_svc(struct file *file, char *buf, size_t size)
{
struct nfsctl_svc *data;
+ int err;
if (size < sizeof(*data))
return -EINVAL;
data = (struct nfsctl_svc*) buf;
- return nfsd_svc(data->svc_port, data->svc_nthreads);
+ err = nfsd_svc(data->svc_port, data->svc_nthreads);
+ if (err < 0)
+ return err;
+ return 0;
}
/**
@@ -692,11 +696,12 @@
if (newthreads < 0)
return -EINVAL;
rv = nfsd_svc(NFS_PORT, newthreads);
- if (rv)
+ if (rv < 0)
return rv;
- }
- sprintf(buf, "%d\n", nfsd_nrthreads());
- return strlen(buf);
+ } else
+ rv = nfsd_nrthreads();
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv);
}
/**
@@ -793,7 +798,7 @@
{
char *mesg = buf;
char *vers, *minorp, sign;
- int len, num;
+ int len, num, remaining;
unsigned minor;
ssize_t tlen = 0;
char *sep;
@@ -840,32 +845,50 @@
}
next:
vers += len + 1;
- tlen += len;
} while ((len = qword_get(&mesg, vers, size)) > 0);
/* If all get turned off, turn them back on, as
* having no versions is BAD
*/
nfsd_reset_versions();
}
+
/* Now write current state into reply buffer */
len = 0;
sep = "";
+ remaining = SIMPLE_TRANSACTION_LIMIT;
for (num=2 ; num <= 4 ; num++)
if (nfsd_vers(num, NFSD_AVAIL)) {
- len += sprintf(buf+len, "%s%c%d", sep,
+ len = snprintf(buf, remaining, "%s%c%d", sep,
nfsd_vers(num, NFSD_TEST)?'+':'-',
num);
sep = " ";
+
+ if (len > remaining)
+ break;
+ remaining -= len;
+ buf += len;
+ tlen += len;
}
if (nfsd_vers(4, NFSD_AVAIL))
- for (minor = 1; minor <= NFSD_SUPPORTED_MINOR_VERSION; minor++)
- len += sprintf(buf+len, " %c4.%u",
+ for (minor = 1; minor <= NFSD_SUPPORTED_MINOR_VERSION;
+ minor++) {
+ len = snprintf(buf, remaining, " %c4.%u",
(nfsd_vers(4, NFSD_TEST) &&
nfsd_minorversion(minor, NFSD_TEST)) ?
'+' : '-',
minor);
- len += sprintf(buf+len, "\n");
- return len;
+
+ if (len > remaining)
+ break;
+ remaining -= len;
+ buf += len;
+ tlen += len;
+ }
+
+ len = snprintf(buf, remaining, "\n");
+ if (len > remaining)
+ return -EINVAL;
+ return tlen + len;
}
/**
@@ -910,104 +933,143 @@
return rv;
}
+/*
+ * Zero-length write. Return a list of NFSD's current listener
+ * transports.
+ */
+static ssize_t __write_ports_names(char *buf)
+{
+ if (nfsd_serv == NULL)
+ return 0;
+ return svc_xprt_names(nfsd_serv, buf, SIMPLE_TRANSACTION_LIMIT);
+}
+
+/*
+ * A single 'fd' number was written, in which case it must be for
+ * a socket of a supported family/protocol, and we use it as an
+ * nfsd listener.
+ */
+static ssize_t __write_ports_addfd(char *buf)
+{
+ char *mesg = buf;
+ int fd, err;
+
+ err = get_int(&mesg, &fd);
+ if (err != 0 || fd < 0)
+ return -EINVAL;
+
+ err = nfsd_create_serv();
+ if (err != 0)
+ return err;
+
+ err = lockd_up();
+ if (err != 0)
+ goto out;
+
+ err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
+ if (err < 0)
+ lockd_down();
+
+out:
+ /* Decrease the count, but don't shut down the service */
+ nfsd_serv->sv_nrthreads--;
+ return err;
+}
+
+/*
+ * A '-' followed by the 'name' of a socket means we close the socket.
+ */
+static ssize_t __write_ports_delfd(char *buf)
+{
+ char *toclose;
+ int len = 0;
+
+ toclose = kstrdup(buf + 1, GFP_KERNEL);
+ if (toclose == NULL)
+ return -ENOMEM;
+
+ if (nfsd_serv != NULL)
+ len = svc_sock_names(nfsd_serv, buf,
+ SIMPLE_TRANSACTION_LIMIT, toclose);
+ if (len >= 0)
+ lockd_down();
+
+ kfree(toclose);
+ return len;
+}
+
+/*
+ * A transport listener is added by writing it's transport name and
+ * a port number.
+ */
+static ssize_t __write_ports_addxprt(char *buf)
+{
+ char transport[16];
+ int port, err;
+
+ if (sscanf(buf, "%15s %4u", transport, &port) != 2)
+ return -EINVAL;
+
+ if (port < 1 || port > USHORT_MAX)
+ return -EINVAL;
+
+ err = nfsd_create_serv();
+ if (err != 0)
+ return err;
+
+ err = svc_create_xprt(nfsd_serv, transport,
+ PF_INET, port, SVC_SOCK_ANONYMOUS);
+ if (err < 0) {
+ /* Give a reasonable perror msg for bad transport string */
+ if (err == -ENOENT)
+ err = -EPROTONOSUPPORT;
+ return err;
+ }
+ return 0;
+}
+
+/*
+ * A transport listener is removed by writing a "-", it's transport
+ * name, and it's port number.
+ */
+static ssize_t __write_ports_delxprt(char *buf)
+{
+ struct svc_xprt *xprt;
+ char transport[16];
+ int port;
+
+ if (sscanf(&buf[1], "%15s %4u", transport, &port) != 2)
+ return -EINVAL;
+
+ if (port < 1 || port > USHORT_MAX || nfsd_serv == NULL)
+ return -EINVAL;
+
+ xprt = svc_find_xprt(nfsd_serv, transport, AF_UNSPEC, port);
+ if (xprt == NULL)
+ return -ENOTCONN;
+
+ svc_close_xprt(xprt);
+ svc_xprt_put(xprt);
+ return 0;
+}
+
static ssize_t __write_ports(struct file *file, char *buf, size_t size)
{
- if (size == 0) {
- int len = 0;
+ if (size == 0)
+ return __write_ports_names(buf);
- if (nfsd_serv)
- len = svc_xprt_names(nfsd_serv, buf, 0);
- return len;
- }
- /* Either a single 'fd' number is written, in which
- * case it must be for a socket of a supported family/protocol,
- * and we use it as an nfsd socket, or
- * A '-' followed by the 'name' of a socket in which case
- * we close the socket.
- */
- if (isdigit(buf[0])) {
- char *mesg = buf;
- int fd;
- int err;
- err = get_int(&mesg, &fd);
- if (err)
- return -EINVAL;
- if (fd < 0)
- return -EINVAL;
- err = nfsd_create_serv();
- if (!err) {
- err = svc_addsock(nfsd_serv, fd, buf);
- if (err >= 0) {
- err = lockd_up();
- if (err < 0)
- svc_sock_names(buf+strlen(buf)+1, nfsd_serv, buf);
- }
- /* Decrease the count, but don't shutdown the
- * the service
- */
- nfsd_serv->sv_nrthreads--;
- }
- return err < 0 ? err : 0;
- }
- if (buf[0] == '-' && isdigit(buf[1])) {
- char *toclose = kstrdup(buf+1, GFP_KERNEL);
- int len = 0;
- if (!toclose)
- return -ENOMEM;
- if (nfsd_serv)
- len = svc_sock_names(buf, nfsd_serv, toclose);
- if (len >= 0)
- lockd_down();
- kfree(toclose);
- return len;
- }
- /*
- * Add a transport listener by writing it's transport name
- */
- if (isalpha(buf[0])) {
- int err;
- char transport[16];
- int port;
- if (sscanf(buf, "%15s %4d", transport, &port) == 2) {
- if (port < 1 || port > 65535)
- return -EINVAL;
- err = nfsd_create_serv();
- if (!err) {
- err = svc_create_xprt(nfsd_serv,
- transport, PF_INET, port,
- SVC_SOCK_ANONYMOUS);
- if (err == -ENOENT)
- /* Give a reasonable perror msg for
- * bad transport string */
- err = -EPROTONOSUPPORT;
- }
- return err < 0 ? err : 0;
- }
- }
- /*
- * Remove a transport by writing it's transport name and port number
- */
- if (buf[0] == '-' && isalpha(buf[1])) {
- struct svc_xprt *xprt;
- int err = -EINVAL;
- char transport[16];
- int port;
- if (sscanf(&buf[1], "%15s %4d", transport, &port) == 2) {
- if (port < 1 || port > 65535)
- return -EINVAL;
- if (nfsd_serv) {
- xprt = svc_find_xprt(nfsd_serv, transport,
- AF_UNSPEC, port);
- if (xprt) {
- svc_close_xprt(xprt);
- svc_xprt_put(xprt);
- err = 0;
- } else
- err = -ENOTCONN;
- }
- return err < 0 ? err : 0;
- }
- }
+ if (isdigit(buf[0]))
+ return __write_ports_addfd(buf);
+
+ if (buf[0] == '-' && isdigit(buf[1]))
+ return __write_ports_delfd(buf);
+
+ if (isalpha(buf[0]))
+ return __write_ports_addxprt(buf);
+
+ if (buf[0] == '-' && isalpha(buf[1]))
+ return __write_ports_delxprt(buf);
+
return -EINVAL;
}
@@ -1030,7 +1092,9 @@
* buf: C string containing an unsigned
* integer value representing a bound
* but unconnected socket that is to be
- * used as an NFSD listener
+ * used as an NFSD listener; listen(3)
+ * must be called for a SOCK_STREAM
+ * socket, otherwise it is ignored
* size: non-zero length of C string in @buf
* Output:
* On success: NFS service is started;
@@ -1138,7 +1202,9 @@
nfsd_max_blksize = bsize;
mutex_unlock(&nfsd_mutex);
}
- return sprintf(buf, "%d\n", nfsd_max_blksize);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n",
+ nfsd_max_blksize);
}
#ifdef CONFIG_NFSD_V4
@@ -1162,8 +1228,9 @@
return -EINVAL;
nfs4_reset_lease(lease);
}
- sprintf(buf, "%ld\n", nfs4_lease_time());
- return strlen(buf);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n",
+ nfs4_lease_time());
}
/**
@@ -1219,8 +1286,9 @@
status = nfs4_reset_recoverydir(recdir);
}
- sprintf(buf, "%s\n", nfs4_recoverydir());
- return strlen(buf);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%s\n",
+ nfs4_recoverydir());
}
/**
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 9f1ca17..8847f3f 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -27,9 +27,6 @@
#define NFSDDBG_FACILITY NFSDDBG_FH
-static int nfsd_nr_verified;
-static int nfsd_nr_put;
-
/*
* our acceptability function.
* if NOSUBTREECHECK, accept anything
@@ -251,7 +248,6 @@
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
- nfsd_nr_verified++;
return 0;
out:
exp_put(exp);
@@ -552,7 +548,6 @@
return nfserr_opnotsupp;
}
- nfsd_nr_verified++;
return 0;
}
@@ -609,7 +604,6 @@
fhp->fh_pre_saved = 0;
fhp->fh_post_saved = 0;
#endif
- nfsd_nr_put++;
}
if (exp) {
cache_put(&exp->h, &svc_export_cache);
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index e298e26..0eb9c82 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -533,45 +533,179 @@
* NFSv2 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
-#define nfsd_proc_none NULL
-#define nfssvc_release_none NULL
struct nfsd_void { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsd_proc_##name, \
- (kxdrproc_t) nfssvc_decode_##argt, \
- (kxdrproc_t) nfssvc_encode_##rest, \
- (kxdrproc_t) nfssvc_release_##relt, \
- sizeof(struct nfsd_##argt), \
- sizeof(struct nfsd_##rest), \
- 0, \
- cache, \
- respsize, \
- }
-
#define ST 1 /* status */
#define FH 8 /* filehandle */
#define AT 18 /* attributes */
static struct svc_procedure nfsd_procedures2[18] = {
- PROC(null, void, void, none, RC_NOCACHE, ST),
- PROC(getattr, fhandle, attrstat, fhandle, RC_NOCACHE, ST+AT),
- PROC(setattr, sattrargs, attrstat, fhandle, RC_REPLBUFF, ST+AT),
- PROC(none, void, void, none, RC_NOCACHE, ST),
- PROC(lookup, diropargs, diropres, fhandle, RC_NOCACHE, ST+FH+AT),
- PROC(readlink, readlinkargs, readlinkres, none, RC_NOCACHE, ST+1+NFS_MAXPATHLEN/4),
- PROC(read, readargs, readres, fhandle, RC_NOCACHE, ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4),
- PROC(none, void, void, none, RC_NOCACHE, ST),
- PROC(write, writeargs, attrstat, fhandle, RC_REPLBUFF, ST+AT),
- PROC(create, createargs, diropres, fhandle, RC_REPLBUFF, ST+FH+AT),
- PROC(remove, diropargs, void, none, RC_REPLSTAT, ST),
- PROC(rename, renameargs, void, none, RC_REPLSTAT, ST),
- PROC(link, linkargs, void, none, RC_REPLSTAT, ST),
- PROC(symlink, symlinkargs, void, none, RC_REPLSTAT, ST),
- PROC(mkdir, createargs, diropres, fhandle, RC_REPLBUFF, ST+FH+AT),
- PROC(rmdir, diropargs, void, none, RC_REPLSTAT, ST),
- PROC(readdir, readdirargs, readdirres, none, RC_NOCACHE, 0),
- PROC(statfs, fhandle, statfsres, none, RC_NOCACHE, ST+5),
+ [NFSPROC_NULL] = {
+ .pc_func = (svc_procfunc) nfsd_proc_null,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_GETATTR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_getattr,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_SETATTR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_setattr,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_sattrargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_sattrargs),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_ROOT] = {
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_LOOKUP] = {
+ .pc_func = (svc_procfunc) nfsd_proc_lookup,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_READLINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_readlink,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readlinkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readlinkres,
+ .pc_argsize = sizeof(struct nfsd_readlinkargs),
+ .pc_ressize = sizeof(struct nfsd_readlinkres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
+ },
+ [NFSPROC_READ] = {
+ .pc_func = (svc_procfunc) nfsd_proc_read,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_readargs),
+ .pc_ressize = sizeof(struct nfsd_readres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+ },
+ [NFSPROC_WRITECACHE] = {
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_WRITE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_write,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_writeargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_writeargs),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_CREATE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_create,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_createargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_REMOVE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_remove,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_RENAME] = {
+ .pc_func = (svc_procfunc) nfsd_proc_rename,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_renameargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_renameargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_LINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_link,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_linkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_linkargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_SYMLINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_symlink,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_symlinkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_symlinkargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_MKDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_mkdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_createargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_RMDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_rmdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_READDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_readdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readdirargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readdirres,
+ .pc_argsize = sizeof(struct nfsd_readdirargs),
+ .pc_ressize = sizeof(struct nfsd_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFSPROC_STATFS] = {
+ .pc_func = (svc_procfunc) nfsd_proc_statfs,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_statfsres,
+ .pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_ressize = sizeof(struct nfsd_statfsres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+5,
+ },
};
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index cbba4a9..d4c9884 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -390,12 +390,14 @@
mutex_lock(&nfsd_mutex);
dprintk("nfsd: creating service\n");
- error = -EINVAL;
if (nrservs <= 0)
nrservs = 0;
if (nrservs > NFSD_MAXSERVS)
nrservs = NFSD_MAXSERVS;
-
+ error = 0;
+ if (nrservs == 0 && nfsd_serv == NULL)
+ goto out;
+
/* Readahead param cache - will no-op if it already exists */
error = nfsd_racache_init(2*nrservs);
if (error<0)
@@ -413,6 +415,12 @@
goto failure;
error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
+ if (error == 0)
+ /* We are holding a reference to nfsd_serv which
+ * we don't want to count in the return value,
+ * so subtract 1
+ */
+ error = nfsd_serv->sv_nrthreads - 1;
failure:
svc_destroy(nfsd_serv); /* Release server */
out:
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 99f8357..4145083 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -966,6 +966,43 @@
mutex_unlock(&dentry->d_inode->i_mutex);
}
+/*
+ * Gathered writes: If another process is currently writing to the file,
+ * there's a high chance this is another nfsd (triggered by a bulk write
+ * from a client's biod). Rather than syncing the file with each write
+ * request, we sleep for 10 msec.
+ *
+ * I don't know if this roughly approximates C. Juszak's idea of
+ * gathered writes, but it's a nice and simple solution (IMHO), and it
+ * seems to work:-)
+ *
+ * Note: we do this only in the NFSv2 case, since v3 and higher have a
+ * better tool (separate unstable writes and commits) for solving this
+ * problem.
+ */
+static int wait_for_concurrent_writes(struct file *file)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ static ino_t last_ino;
+ static dev_t last_dev;
+ int err = 0;
+
+ if (atomic_read(&inode->i_writecount) > 1
+ || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
+ dprintk("nfsd: write defer %d\n", task_pid_nr(current));
+ msleep(10);
+ dprintk("nfsd: write resume %d\n", task_pid_nr(current));
+ }
+
+ if (inode->i_state & I_DIRTY) {
+ dprintk("nfsd: write sync %d\n", task_pid_nr(current));
+ err = nfsd_sync(file);
+ }
+ last_ino = inode->i_ino;
+ last_dev = inode->i_sb->s_dev;
+ return err;
+}
+
static __be32
nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen,
@@ -978,6 +1015,7 @@
__be32 err = 0;
int host_err;
int stable = *stablep;
+ int use_wgather;
#ifdef MSNFS
err = nfserr_perm;
@@ -996,9 +1034,10 @@
* - the sync export option has been set, or
* - the client requested O_SYNC behavior (NFSv3 feature).
* - The file system doesn't support fsync().
- * When gathered writes have been configured for this volume,
+ * When NFSv2 gathered writes have been configured for this volume,
* flushing the data to disk is handled separately below.
*/
+ use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
if (!file->f_op->fsync) {/* COMMIT3 cannot work */
stable = 2;
@@ -1007,7 +1046,7 @@
if (!EX_ISSYNC(exp))
stable = 0;
- if (stable && !EX_WGATHER(exp)) {
+ if (stable && !use_wgather) {
spin_lock(&file->f_lock);
file->f_flags |= O_SYNC;
spin_unlock(&file->f_lock);
@@ -1017,52 +1056,20 @@
oldfs = get_fs(); set_fs(KERNEL_DS);
host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
set_fs(oldfs);
- if (host_err >= 0) {
- *cnt = host_err;
- nfsdstats.io_write += host_err;
- fsnotify_modify(file->f_path.dentry);
- }
+ if (host_err < 0)
+ goto out_nfserr;
+ *cnt = host_err;
+ nfsdstats.io_write += host_err;
+ fsnotify_modify(file->f_path.dentry);
/* clear setuid/setgid flag after write */
- if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
+ if (inode->i_mode & (S_ISUID | S_ISGID))
kill_suid(dentry);
- if (host_err >= 0 && stable) {
- static ino_t last_ino;
- static dev_t last_dev;
+ if (stable && use_wgather)
+ host_err = wait_for_concurrent_writes(file);
- /*
- * Gathered writes: If another process is currently
- * writing to the file, there's a high chance
- * this is another nfsd (triggered by a bulk write
- * from a client's biod). Rather than syncing the
- * file with each write request, we sleep for 10 msec.
- *
- * I don't know if this roughly approximates
- * C. Juszak's idea of gathered writes, but it's a
- * nice and simple solution (IMHO), and it seems to
- * work:-)
- */
- if (EX_WGATHER(exp)) {
- if (atomic_read(&inode->i_writecount) > 1
- || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
- dprintk("nfsd: write defer %d\n", task_pid_nr(current));
- msleep(10);
- dprintk("nfsd: write resume %d\n", task_pid_nr(current));
- }
-
- if (inode->i_state & I_DIRTY) {
- dprintk("nfsd: write sync %d\n", task_pid_nr(current));
- host_err=nfsd_sync(file);
- }
-#if 0
- wake_up(&inode->i_wait);
-#endif
- }
- last_ino = inode->i_ino;
- last_dev = inode->i_sb->s_dev;
- }
-
+out_nfserr:
dprintk("nfsd: write complete host_err=%d\n", host_err);
if (host_err >= 0)
err = 0;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2696d6b..fe9d8f2 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -309,10 +309,6 @@
/* ii->i_file_acl = 0; */
/* ii->i_dir_acl = 0; */
ii->i_dir_start_lookup = 0;
-#ifdef CONFIG_NILFS_FS_POSIX_ACL
- ii->i_acl = NULL;
- ii->i_default_acl = NULL;
-#endif
ii->i_cno = 0;
nilfs_set_inode_flags(inode);
spin_lock(&sbi->s_next_gen_lock);
@@ -434,10 +430,6 @@
raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);
-#ifdef CONFIG_NILFS_FS_POSIX_ACL
- ii->i_acl = NILFS_ACL_NOT_CACHED;
- ii->i_default_acl = NILFS_ACL_NOT_CACHED;
-#endif
if (nilfs_read_inode_common(inode, raw_inode))
goto failed_unmap;
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index edf6a59..724c637 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -58,10 +58,6 @@
*/
struct rw_semaphore xattr_sem;
#endif
-#ifdef CONFIG_NILFS_POSIX_ACL
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-#endif
struct buffer_head *i_bh; /* i_bh contains a new or dirty
disk inode */
struct inode vfs_inode;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index ab785f8..8e2ec43 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -189,16 +189,6 @@
{
struct nilfs_inode_info *ii = NILFS_I(inode);
-#ifdef CONFIG_NILFS_POSIX_ACL
- if (ii->i_acl && ii->i_acl != NILFS_ACL_NOT_CACHED) {
- posix_acl_release(ii->i_acl);
- ii->i_acl = NILFS_ACL_NOT_CACHED;
- }
- if (ii->i_default_acl && ii->i_default_acl != NILFS_ACL_NOT_CACHED) {
- posix_acl_release(ii->i_default_acl);
- ii->i_default_acl = NILFS_ACL_NOT_CACHED;
- }
-#endif
/*
* Free resources allocated in nilfs_read_inode(), here.
*/
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 6cdeaa7..110bb57 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -92,6 +92,9 @@
enum ocfs2_unblock_action unblock_action;
};
+/* Lockdep class keys */
+struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
+
static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
@@ -317,9 +320,16 @@
u32 dlm_flags);
static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
int wanted);
-static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres,
- int level);
+static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level, unsigned long caller_ip);
+static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level)
+{
+ __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
+}
+
static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
@@ -489,6 +499,13 @@
ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
ocfs2_init_lock_stats(res);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (type != OCFS2_LOCK_TYPE_OPEN)
+ lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
+ &lockdep_keys[type], 0);
+ else
+ res->l_lockdep_map.key = NULL;
+#endif
}
void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
@@ -644,14 +661,10 @@
static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
- struct ocfs2_orphan_scan_lvb *lvb;
-
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
&ocfs2_orphan_scan_lops, osb);
- lvb = ocfs2_dlm_lvb(&res->l_lksb);
- lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
}
void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
@@ -1256,11 +1269,13 @@
return ret;
}
-static int ocfs2_cluster_lock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres,
- int level,
- u32 lkm_flags,
- int arg_flags)
+static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ u32 lkm_flags,
+ int arg_flags,
+ int l_subclass,
+ unsigned long caller_ip)
{
struct ocfs2_mask_waiter mw;
int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
@@ -1403,13 +1418,37 @@
}
ocfs2_update_lock_stats(lockres, level, &mw, ret);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (!ret && lockres->l_lockdep_map.key != NULL) {
+ if (level == DLM_LOCK_PR)
+ rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
+ !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
+ caller_ip);
+ else
+ rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
+ !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
+ caller_ip);
+ }
+#endif
mlog_exit(ret);
return ret;
}
-static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
- struct ocfs2_lock_res *lockres,
- int level)
+static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ u32 lkm_flags,
+ int arg_flags)
+{
+ return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
+ 0, _RET_IP_);
+}
+
+
+static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ unsigned long caller_ip)
{
unsigned long flags;
@@ -1418,6 +1457,10 @@
ocfs2_dec_holders(lockres, level);
ocfs2_downconvert_on_unlock(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (lockres->l_lockdep_map.key != NULL)
+ rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
+#endif
mlog_exit_void();
}
@@ -1989,7 +2032,8 @@
{
struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
- if (lvb->lvb_version == OCFS2_LVB_VERSION
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
+ && lvb->lvb_version == OCFS2_LVB_VERSION
&& be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
return 1;
return 0;
@@ -2162,10 +2206,11 @@
* returns < 0 error if the callback will never be called, otherwise
* the result of the lock will be communicated via the callback.
*/
-int ocfs2_inode_lock_full(struct inode *inode,
- struct buffer_head **ret_bh,
- int ex,
- int arg_flags)
+int ocfs2_inode_lock_full_nested(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ int arg_flags,
+ int subclass)
{
int status, level, acquired;
u32 dlm_flags;
@@ -2203,7 +2248,8 @@
if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
dlm_flags |= DLM_LKF_NOQUEUE;
- status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
+ status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
+ arg_flags, subclass, _RET_IP_);
if (status < 0) {
if (status != -EAGAIN && status != -EIOCBRETRY)
mlog_errno(status);
@@ -2369,35 +2415,45 @@
mlog_exit_void();
}
-int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno, int ex)
+int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{
struct ocfs2_lock_res *lockres;
struct ocfs2_orphan_scan_lvb *lvb;
- int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
int status = 0;
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
lockres = &osb->osb_orphan_scan.os_lockres;
- status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
+ status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
if (status < 0)
return status;
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
- if (lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
+ lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
*seqno = be32_to_cpu(lvb->lvb_os_seqno);
+ else
+ *seqno = osb->osb_orphan_scan.os_seqno + 1;
+
return status;
}
-void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno, int ex)
+void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
{
struct ocfs2_lock_res *lockres;
struct ocfs2_orphan_scan_lvb *lvb;
- int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
- lockres = &osb->osb_orphan_scan.os_lockres;
- lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
- lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
- lvb->lvb_os_seqno = cpu_to_be32(seqno);
- ocfs2_cluster_unlock(osb, lockres, level);
+ if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
+ lockres = &osb->osb_orphan_scan.os_lockres;
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
+ lvb->lvb_os_seqno = cpu_to_be32(seqno);
+ ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
+ }
}
int ocfs2_super_lock(struct ocfs2_super *osb,
@@ -3627,7 +3683,8 @@
struct ocfs2_global_disk_dqinfo *gdinfo;
int status = 0;
- if (lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
+ lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 31b90d7..7553836 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -78,6 +78,14 @@
/* don't block waiting for the downconvert thread, instead return -EAGAIN */
#define OCFS2_LOCK_NONBLOCK (0x04)
+/* Locking subclasses of inode cluster lock */
+enum {
+ OI_LS_NORMAL = 0,
+ OI_LS_PARENT,
+ OI_LS_RENAME1,
+ OI_LS_RENAME2,
+};
+
int ocfs2_dlm_init(struct ocfs2_super *osb);
void ocfs2_dlm_shutdown(struct ocfs2_super *osb, int hangup_pending);
void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res);
@@ -104,25 +112,31 @@
int ocfs2_inode_lock_atime(struct inode *inode,
struct vfsmount *vfsmnt,
int *level);
-int ocfs2_inode_lock_full(struct inode *inode,
+int ocfs2_inode_lock_full_nested(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
- int arg_flags);
+ int arg_flags,
+ int subclass);
int ocfs2_inode_lock_with_page(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
struct page *page);
+/* Variants without special locking class or flags */
+#define ocfs2_inode_lock_full(i, r, e, f)\
+ ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
+#define ocfs2_inode_lock_nested(i, b, e, s)\
+ ocfs2_inode_lock_full_nested(i, b, e, 0, s)
/* 99% of the time we don't want to supply any additional flags --
* those are for very specific cases only. */
-#define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full(i, b, e, 0)
+#define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full_nested(i, b, e, 0, OI_LS_NORMAL)
void ocfs2_inode_unlock(struct inode *inode,
int ex);
int ocfs2_super_lock(struct ocfs2_super *osb,
int ex);
void ocfs2_super_unlock(struct ocfs2_super *osb,
int ex);
-int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno, int ex);
-void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno, int ex);
+int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno);
+void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno);
int ocfs2_rename_lock(struct ocfs2_super *osb);
void ocfs2_rename_unlock(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 07267e0..62442e4 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2026,7 +2026,7 @@
size_t len,
unsigned int flags)
{
- int ret = 0;
+ int ret = 0, lock_level = 0;
struct inode *inode = in->f_path.dentry->d_inode;
mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
@@ -2037,12 +2037,12 @@
/*
* See the comment in ocfs2_file_aio_read()
*/
- ret = ocfs2_inode_lock(inode, NULL, 0);
+ ret = ocfs2_inode_lock_atime(inode, in->f_vfsmnt, &lock_level);
if (ret < 0) {
mlog_errno(ret);
goto bail;
}
- ocfs2_inode_unlock(inode, 0);
+ ocfs2_inode_unlock(inode, lock_level);
ret = generic_file_splice_read(in, ppos, pipe, len, flags);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 10e1fa8..4dc8890 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -215,6 +215,8 @@
static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
{
struct ocfs2_find_inode_args *args = opaque;
+ static struct lock_class_key ocfs2_quota_ip_alloc_sem_key,
+ ocfs2_file_ip_alloc_sem_key;
mlog_entry("inode = %p, opaque = %p\n", inode, opaque);
@@ -223,6 +225,15 @@
if (args->fi_sysfile_type != 0)
lockdep_set_class(&inode->i_mutex,
&ocfs2_sysfile_lock_key[args->fi_sysfile_type]);
+ if (args->fi_sysfile_type == USER_QUOTA_SYSTEM_INODE ||
+ args->fi_sysfile_type == GROUP_QUOTA_SYSTEM_INODE ||
+ args->fi_sysfile_type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
+ args->fi_sysfile_type == LOCAL_GROUP_QUOTA_SYSTEM_INODE)
+ lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
+ &ocfs2_quota_ip_alloc_sem_key);
+ else
+ lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem,
+ &ocfs2_file_ip_alloc_sem_key);
mlog_exit(0);
return 0;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 4a3b9e6..f033760 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1880,13 +1880,20 @@
os = &osb->osb_orphan_scan;
- status = ocfs2_orphan_scan_lock(osb, &seqno, DLM_LOCK_EX);
+ if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
+ goto out;
+
+ status = ocfs2_orphan_scan_lock(osb, &seqno);
if (status < 0) {
if (status != -EAGAIN)
mlog_errno(status);
goto out;
}
+ /* Do no queue the tasks if the volume is being umounted */
+ if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
+ goto unlock;
+
if (os->os_seqno != seqno) {
os->os_seqno = seqno;
goto unlock;
@@ -1903,7 +1910,7 @@
os->os_count++;
os->os_scantime = CURRENT_TIME;
unlock:
- ocfs2_orphan_scan_unlock(osb, seqno, DLM_LOCK_EX);
+ ocfs2_orphan_scan_unlock(osb, seqno);
out:
return;
}
@@ -1920,8 +1927,9 @@
mutex_lock(&os->os_lock);
ocfs2_queue_orphan_scan(osb);
- schedule_delayed_work(&os->os_orphan_scan_work,
- ocfs2_orphan_scan_timeout());
+ if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
+ schedule_delayed_work(&os->os_orphan_scan_work,
+ ocfs2_orphan_scan_timeout());
mutex_unlock(&os->os_lock);
}
@@ -1930,26 +1938,33 @@
struct ocfs2_orphan_scan *os;
os = &osb->osb_orphan_scan;
- mutex_lock(&os->os_lock);
- cancel_delayed_work(&os->os_orphan_scan_work);
- mutex_unlock(&os->os_lock);
+ if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE) {
+ atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
+ mutex_lock(&os->os_lock);
+ cancel_delayed_work(&os->os_orphan_scan_work);
+ mutex_unlock(&os->os_lock);
+ }
}
-int ocfs2_orphan_scan_init(struct ocfs2_super *osb)
+void ocfs2_orphan_scan_init(struct ocfs2_super *osb)
{
struct ocfs2_orphan_scan *os;
os = &osb->osb_orphan_scan;
os->os_osb = osb;
os->os_count = 0;
+ os->os_seqno = 0;
os->os_scantime = CURRENT_TIME;
mutex_init(&os->os_lock);
+ INIT_DELAYED_WORK(&os->os_orphan_scan_work, ocfs2_orphan_scan_work);
- INIT_DELAYED_WORK(&os->os_orphan_scan_work,
- ocfs2_orphan_scan_work);
- schedule_delayed_work(&os->os_orphan_scan_work,
- ocfs2_orphan_scan_timeout());
- return 0;
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
+ atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
+ else {
+ atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
+ schedule_delayed_work(&os->os_orphan_scan_work,
+ ocfs2_orphan_scan_timeout());
+ }
}
struct ocfs2_orphan_filldir_priv {
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 61045ee..5432c7f 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -144,7 +144,7 @@
}
/* Exported only for the journal struct init code in super.c. Do not call. */
-int ocfs2_orphan_scan_init(struct ocfs2_super *osb);
+void ocfs2_orphan_scan_init(struct ocfs2_super *osb);
void ocfs2_orphan_scan_stop(struct ocfs2_super *osb);
void ocfs2_orphan_scan_exit(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 33464c6b..8601f93 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -118,7 +118,7 @@
mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len,
dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno);
- status = ocfs2_inode_lock(dir, NULL, 0);
+ status = ocfs2_inode_lock_nested(dir, NULL, 0, OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
@@ -636,7 +636,7 @@
if (S_ISDIR(inode->i_mode))
return -EPERM;
- err = ocfs2_inode_lock(dir, &parent_fe_bh, 1);
+ err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
if (err < 0) {
if (err != -ENOENT)
mlog_errno(err);
@@ -800,7 +800,8 @@
return -EPERM;
}
- status = ocfs2_inode_lock(dir, &parent_node_bh, 1);
+ status = ocfs2_inode_lock_nested(dir, &parent_node_bh, 1,
+ OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
@@ -978,7 +979,8 @@
inode1 = tmpinode;
}
/* lock id2 */
- status = ocfs2_inode_lock(inode2, bh2, 1);
+ status = ocfs2_inode_lock_nested(inode2, bh2, 1,
+ OI_LS_RENAME1);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
@@ -987,7 +989,7 @@
}
/* lock id1 */
- status = ocfs2_inode_lock(inode1, bh1, 1);
+ status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
if (status < 0) {
/*
* An error return must mean that no cluster locks
@@ -1103,7 +1105,8 @@
* won't have to concurrently downconvert the inode and the
* dentry locks.
*/
- status = ocfs2_inode_lock(old_inode, &old_inode_bh, 1);
+ status = ocfs2_inode_lock_nested(old_inode, &old_inode_bh, 1,
+ OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 18c1d9e..c9345eb 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -34,6 +34,7 @@
#include <linux/workqueue.h>
#include <linux/kref.h>
#include <linux/mutex.h>
+#include <linux/lockdep.h>
#ifndef CONFIG_OCFS2_COMPAT_JBD
# include <linux/jbd2.h>
#else
@@ -152,6 +153,14 @@
unsigned int l_lock_max_exmode; /* Max wait for EX */
unsigned int l_lock_refresh; /* Disk refreshes */
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map l_lockdep_map;
+#endif
+};
+
+enum ocfs2_orphan_scan_state {
+ ORPHAN_SCAN_ACTIVE,
+ ORPHAN_SCAN_INACTIVE
};
struct ocfs2_orphan_scan {
@@ -162,6 +171,7 @@
struct timespec os_scantime; /* time this node ran the scan */
u32 os_count; /* tracks node specific scans */
u32 os_seqno; /* tracks cluster wide scans */
+ atomic_t os_state; /* ACTIVE or INACTIVE */
};
struct ocfs2_dlm_debug {
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index fcd120f..3f66137 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -236,6 +236,16 @@
return dlm_status_to_errno(lksb->lksb_o2dlm.status);
}
+/*
+ * o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB
+ * contents, it will zero out the LVB. Thus the caller can always trust
+ * the contents.
+ */
+static int o2cb_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+{
+ return 1;
+}
+
static void *o2cb_dlm_lvb(union ocfs2_dlm_lksb *lksb)
{
return (void *)(lksb->lksb_o2dlm.lvb);
@@ -354,6 +364,7 @@
.dlm_lock = o2cb_dlm_lock,
.dlm_unlock = o2cb_dlm_unlock,
.lock_status = o2cb_dlm_lock_status,
+ .lvb_valid = o2cb_dlm_lvb_valid,
.lock_lvb = o2cb_dlm_lvb,
.dump_lksb = o2cb_dump_lksb,
};
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 9b76d41..ff4c798 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -738,6 +738,13 @@
return lksb->lksb_fsdlm.sb_status;
}
+static int user_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+{
+ int invalid = lksb->lksb_fsdlm.sb_flags & DLM_SBF_VALNOTVALID;
+
+ return !invalid;
+}
+
static void *user_dlm_lvb(union ocfs2_dlm_lksb *lksb)
{
if (!lksb->lksb_fsdlm.sb_lvbptr)
@@ -873,6 +880,7 @@
.dlm_lock = user_dlm_lock,
.dlm_unlock = user_dlm_unlock,
.lock_status = user_dlm_lock_status,
+ .lvb_valid = user_dlm_lvb_valid,
.lock_lvb = user_dlm_lvb,
.plock = user_plock,
.dump_lksb = user_dlm_dump_lksb,
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 68b668b..3f2f1c4 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -6,7 +6,7 @@
* Code which implements an OCFS2 specific interface to underlying
* cluster stacks.
*
- * Copyright (C) 2007 Oracle. All rights reserved.
+ * Copyright (C) 2007, 2009 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
@@ -271,11 +271,12 @@
}
EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status);
-/*
- * Why don't we cast to ocfs2_meta_lvb? The "clean" answer is that we
- * don't cast at the glue level. The real answer is that the header
- * ordering is nigh impossible.
- */
+int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb)
+{
+ return active_stack->sp_ops->lvb_valid(lksb);
+}
+EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid);
+
void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb)
{
return active_stack->sp_ops->lock_lvb(lksb);
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index c571af3..03a44d6 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -186,6 +186,11 @@
int (*lock_status)(union ocfs2_dlm_lksb *lksb);
/*
+ * Return non-zero if the LVB is valid.
+ */
+ int (*lvb_valid)(union ocfs2_dlm_lksb *lksb);
+
+ /*
* Pull the lvb pointer off of the stack-specific lksb.
*/
void *(*lock_lvb)(union ocfs2_dlm_lksb *lksb);
@@ -252,6 +257,7 @@
struct ocfs2_lock_res *astarg);
int ocfs2_dlm_lock_status(union ocfs2_dlm_lksb *lksb);
+int ocfs2_dlm_lvb_valid(union ocfs2_dlm_lksb *lksb);
void *ocfs2_dlm_lvb(union ocfs2_dlm_lksb *lksb);
void ocfs2_dlm_dump_lksb(union ocfs2_dlm_lksb *lksb);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 8439f6b..73a16d4 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -923,14 +923,23 @@
int nr)
{
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ int ret;
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
return 0;
- if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data)
+
+ if (!buffer_jbd(bg_bh))
return 1;
+ jbd_lock_bh_state(bg_bh);
bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
- return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
+ if (bg)
+ ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
+ else
+ ret = 1;
+ jbd_unlock_bh_state(bg_bh);
+
+ return ret;
}
static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
@@ -1885,6 +1894,7 @@
unsigned int tmp;
int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
struct ocfs2_group_desc *undo_bg = NULL;
+ int cluster_bitmap = 0;
mlog_entry_void();
@@ -1905,18 +1915,28 @@
}
if (ocfs2_is_cluster_bitmap(alloc_inode))
- undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data;
+ cluster_bitmap = 1;
+
+ if (cluster_bitmap) {
+ jbd_lock_bh_state(group_bh);
+ undo_bg = (struct ocfs2_group_desc *)
+ bh2jh(group_bh)->b_committed_data;
+ BUG_ON(!undo_bg);
+ }
tmp = num_bits;
while(tmp--) {
ocfs2_clear_bit((bit_off + tmp),
(unsigned long *) bg->bg_bitmap);
- if (ocfs2_is_cluster_bitmap(alloc_inode))
+ if (cluster_bitmap)
ocfs2_set_bit(bit_off + tmp,
(unsigned long *) undo_bg->bg_bitmap);
}
le16_add_cpu(&bg->bg_free_bits_count, num_bits);
+ if (cluster_bitmap)
+ jbd_unlock_bh_state(group_bh);
+
status = ocfs2_journal_dirty(handle, group_bh);
if (status < 0)
mlog_errno(status);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 0d3ed74..7efb349 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -205,11 +205,10 @@
#ifdef CONFIG_DEBUG_FS
static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
{
- int out = 0;
- int i;
struct ocfs2_cluster_connection *cconn = osb->cconn;
struct ocfs2_recovery_map *rm = osb->recovery_map;
- struct ocfs2_orphan_scan *os;
+ struct ocfs2_orphan_scan *os = &osb->osb_orphan_scan;
+ int i, out = 0;
out += snprintf(buf + out, len - out,
"%10s => Id: %-s Uuid: %-s Gen: 0x%X Label: %-s\n",
@@ -234,20 +233,24 @@
"%10s => Opts: 0x%lX AtimeQuanta: %u\n", "Mount",
osb->s_mount_opt, osb->s_atime_quantum);
- out += snprintf(buf + out, len - out,
- "%10s => Stack: %s Name: %*s Version: %d.%d\n",
- "Cluster",
- (*osb->osb_cluster_stack == '\0' ?
- "o2cb" : osb->osb_cluster_stack),
- cconn->cc_namelen, cconn->cc_name,
- cconn->cc_version.pv_major, cconn->cc_version.pv_minor);
+ if (cconn) {
+ out += snprintf(buf + out, len - out,
+ "%10s => Stack: %s Name: %*s "
+ "Version: %d.%d\n", "Cluster",
+ (*osb->osb_cluster_stack == '\0' ?
+ "o2cb" : osb->osb_cluster_stack),
+ cconn->cc_namelen, cconn->cc_name,
+ cconn->cc_version.pv_major,
+ cconn->cc_version.pv_minor);
+ }
spin_lock(&osb->dc_task_lock);
out += snprintf(buf + out, len - out,
"%10s => Pid: %d Count: %lu WakeSeq: %lu "
"WorkSeq: %lu\n", "DownCnvt",
- task_pid_nr(osb->dc_task), osb->blocked_lock_count,
- osb->dc_wake_sequence, osb->dc_work_sequence);
+ (osb->dc_task ? task_pid_nr(osb->dc_task) : -1),
+ osb->blocked_lock_count, osb->dc_wake_sequence,
+ osb->dc_work_sequence);
spin_unlock(&osb->dc_task_lock);
spin_lock(&osb->osb_lock);
@@ -267,14 +270,15 @@
out += snprintf(buf + out, len - out,
"%10s => Pid: %d Interval: %lu Needs: %d\n", "Commit",
- task_pid_nr(osb->commit_task), osb->osb_commit_interval,
+ (osb->commit_task ? task_pid_nr(osb->commit_task) : -1),
+ osb->osb_commit_interval,
atomic_read(&osb->needs_checkpoint));
out += snprintf(buf + out, len - out,
- "%10s => State: %d NumTxns: %d TxnId: %lu\n",
+ "%10s => State: %d TxnId: %lu NumTxns: %d\n",
"Journal", osb->journal->j_state,
- atomic_read(&osb->journal->j_num_trans),
- osb->journal->j_trans_id);
+ osb->journal->j_trans_id,
+ atomic_read(&osb->journal->j_num_trans));
out += snprintf(buf + out, len - out,
"%10s => GlobalAllocs: %d LocalAllocs: %d "
@@ -300,9 +304,18 @@
atomic_read(&osb->s_num_inodes_stolen));
spin_unlock(&osb->osb_lock);
+ out += snprintf(buf + out, len - out, "OrphanScan => ");
+ out += snprintf(buf + out, len - out, "Local: %u Global: %u ",
+ os->os_count, os->os_seqno);
+ out += snprintf(buf + out, len - out, " Last Scan: ");
+ if (atomic_read(&os->os_state) == ORPHAN_SCAN_INACTIVE)
+ out += snprintf(buf + out, len - out, "Disabled\n");
+ else
+ out += snprintf(buf + out, len - out, "%lu seconds ago\n",
+ (get_seconds() - os->os_scantime.tv_sec));
+
out += snprintf(buf + out, len - out, "%10s => %3s %10s\n",
"Slots", "Num", "RecoGen");
-
for (i = 0; i < osb->max_slots; ++i) {
out += snprintf(buf + out, len - out,
"%10s %c %3d %10d\n",
@@ -311,13 +324,6 @@
i, osb->slot_recovery_generations[i]);
}
- os = &osb->osb_orphan_scan;
- out += snprintf(buf + out, len - out, "Orphan Scan=> ");
- out += snprintf(buf + out, len - out, "Local: %u Global: %u ",
- os->os_count, os->os_seqno);
- out += snprintf(buf + out, len - out, " Last Scan: %lu seconds ago\n",
- (get_seconds() - os->os_scantime.tv_sec));
-
return out;
}
@@ -1175,6 +1181,9 @@
atomic_set(&osb->vol_state, VOLUME_MOUNTED_QUOTAS);
wake_up(&osb->osb_mount_event);
+ /* Start this when the mount is almost sure of being successful */
+ ocfs2_orphan_scan_init(osb);
+
mlog_exit(status);
return status;
@@ -1810,14 +1819,15 @@
debugfs_remove(osb->osb_ctxt);
+ /* Orphan scan should be stopped as early as possible */
+ ocfs2_orphan_scan_stop(osb);
+
ocfs2_disable_quotas(osb);
ocfs2_shutdown_local_alloc(osb);
ocfs2_truncate_log_shutdown(osb);
- ocfs2_orphan_scan_stop(osb);
-
/* This will disable recovery and flush any recovery work. */
ocfs2_recovery_exit(osb);
@@ -1978,13 +1988,6 @@
goto bail;
}
- status = ocfs2_orphan_scan_init(osb);
- if (status) {
- mlog(ML_ERROR, "Unable to initialize delayed orphan scan\n");
- mlog_errno(status);
- goto bail;
- }
-
init_waitqueue_head(&osb->checkpoint_event);
atomic_set(&osb->needs_checkpoint, 0);
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
index ab713eb..40e5370 100644
--- a/fs/ocfs2/sysfile.c
+++ b/fs/ocfs2/sysfile.c
@@ -50,6 +50,10 @@
int type,
u32 slot);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key ocfs2_sysfile_cluster_lock_key[NUM_SYSTEM_INODES];
+#endif
+
static inline int is_global_system_inode(int type)
{
return type >= OCFS2_FIRST_ONLINE_SYSTEM_INODE &&
@@ -118,6 +122,21 @@
inode = NULL;
goto bail;
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
+ type == LOCAL_GROUP_QUOTA_SYSTEM_INODE ||
+ type == JOURNAL_SYSTEM_INODE) {
+ /* Ignore inode lock on these inodes as the lock does not
+ * really belong to any process and lockdep cannot handle
+ * that */
+ OCFS2_I(inode)->ip_inode_lockres.l_lockdep_map.key = NULL;
+ } else {
+ lockdep_init_map(&OCFS2_I(inode)->ip_inode_lockres.
+ l_lockdep_map,
+ ocfs2_system_inodes[type].si_name,
+ &ocfs2_sysfile_cluster_lock_key[type], 0);
+ }
+#endif
bail:
return inode;
diff --git a/fs/open.c b/fs/open.c
index 7200e23..dd98e80 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -378,63 +378,63 @@
#endif
#endif /* BITS_PER_LONG == 32 */
-SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
+
+int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
{
- struct file *file;
- struct inode *inode;
- long ret = -EINVAL;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ long ret;
if (offset < 0 || len <= 0)
- goto out;
+ return -EINVAL;
/* Return error if mode is not supported */
- ret = -EOPNOTSUPP;
if (mode && !(mode & FALLOC_FL_KEEP_SIZE))
- goto out;
+ return -EOPNOTSUPP;
- ret = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
if (!(file->f_mode & FMODE_WRITE))
- goto out_fput;
+ return -EBADF;
/*
* Revalidate the write permissions, in case security policy has
* changed since the files were opened.
*/
ret = security_file_permission(file, MAY_WRITE);
if (ret)
- goto out_fput;
+ return ret;
- inode = file->f_path.dentry->d_inode;
-
- ret = -ESPIPE;
if (S_ISFIFO(inode->i_mode))
- goto out_fput;
+ return -ESPIPE;
- ret = -ENODEV;
/*
* Let individual file system decide if it supports preallocation
* for directories or not.
*/
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- goto out_fput;
+ return -ENODEV;
- ret = -EFBIG;
/* Check for wrap through zero too */
if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0))
- goto out_fput;
+ return -EFBIG;
- if (inode->i_op->fallocate)
- ret = inode->i_op->fallocate(inode, mode, offset, len);
- else
- ret = -EOPNOTSUPP;
+ if (!inode->i_op->fallocate)
+ return -EOPNOTSUPP;
-out_fput:
- fput(file);
-out:
- return ret;
+ return inode->i_op->fallocate(inode, mode, offset, len);
}
+
+SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
+{
+ struct file *file;
+ int error = -EBADF;
+
+ file = fget(fd);
+ if (file) {
+ error = do_fallocate(file, mode, offset, len);
+ fput(file);
+ }
+
+ return error;
+}
+
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_fallocate(long fd, long mode, loff_t offset, loff_t len)
{
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 6fd0f47..a14d6cd 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1131,8 +1131,6 @@
REISERFS_I(inode)->i_trans_id = 0;
REISERFS_I(inode)->i_jl = NULL;
mutex_init(&(REISERFS_I(inode)->i_mmap));
- reiserfs_init_acl_access(inode);
- reiserfs_init_acl_default(inode);
reiserfs_init_xattr_rwsem(inode);
if (stat_data_v1(ih)) {
@@ -1834,8 +1832,6 @@
REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
mutex_init(&(REISERFS_I(inode)->i_mmap));
- reiserfs_init_acl_access(inode);
- reiserfs_init_acl_default(inode);
reiserfs_init_xattr_rwsem(inode);
/* key to search for correct place for new stat data */
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 238e9d9..18b315d 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -82,7 +82,6 @@
if (reiserfs_allocate_list_bitmaps(s, jbitmap, bmap_nr_new) < 0) {
printk
("reiserfs_resize: unable to allocate memory for journal bitmaps\n");
- unlock_super(s);
return -ENOMEM;
}
/* the new journal bitmaps are zero filled, now we copy in the bitmap
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 2969773..d3aeb06 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -529,10 +529,6 @@
INIT_LIST_HEAD(&ei->i_prealloc_list);
inode_init_once(&ei->vfs_inode);
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
- ei->i_acl_access = NULL;
- ei->i_acl_default = NULL;
-#endif
}
static int init_inodecache(void)
@@ -580,25 +576,6 @@
reiserfs_write_unlock(inode->i_sb);
}
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
-static void reiserfs_clear_inode(struct inode *inode)
-{
- struct posix_acl *acl;
-
- acl = REISERFS_I(inode)->i_acl_access;
- if (acl && !IS_ERR(acl))
- posix_acl_release(acl);
- REISERFS_I(inode)->i_acl_access = NULL;
-
- acl = REISERFS_I(inode)->i_acl_default;
- if (acl && !IS_ERR(acl))
- posix_acl_release(acl);
- REISERFS_I(inode)->i_acl_default = NULL;
-}
-#else
-#define reiserfs_clear_inode NULL
-#endif
-
#ifdef CONFIG_QUOTA
static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
size_t, loff_t);
@@ -612,7 +589,6 @@
.write_inode = reiserfs_write_inode,
.dirty_inode = reiserfs_dirty_inode,
.delete_inode = reiserfs_delete_inode,
- .clear_inode = reiserfs_clear_inode,
.put_super = reiserfs_put_super,
.write_super = reiserfs_write_super,
.sync_fs = reiserfs_sync_fs,
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index c303c42..35d6e67 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -188,29 +188,6 @@
return ERR_PTR(-EINVAL);
}
-static inline void iset_acl(struct inode *inode, struct posix_acl **i_acl,
- struct posix_acl *acl)
-{
- spin_lock(&inode->i_lock);
- if (*i_acl != ERR_PTR(-ENODATA))
- posix_acl_release(*i_acl);
- *i_acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
-}
-
-static inline struct posix_acl *iget_acl(struct inode *inode,
- struct posix_acl **i_acl)
-{
- struct posix_acl *acl = ERR_PTR(-ENODATA);
-
- spin_lock(&inode->i_lock);
- if (*i_acl != ERR_PTR(-ENODATA))
- acl = posix_acl_dup(*i_acl);
- spin_unlock(&inode->i_lock);
-
- return acl;
-}
-
/*
* Inode operation get_posix_acl().
*
@@ -220,34 +197,29 @@
struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
{
char *name, *value;
- struct posix_acl *acl, **p_acl;
+ struct posix_acl *acl;
int size;
int retval;
- struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
switch (type) {
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
- p_acl = &reiserfs_i->i_acl_access;
break;
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
- p_acl = &reiserfs_i->i_acl_default;
break;
default:
- return ERR_PTR(-EINVAL);
+ BUG();
}
- acl = iget_acl(inode, p_acl);
- if (acl && !IS_ERR(acl))
- return acl;
- else if (PTR_ERR(acl) == -ENODATA)
- return NULL;
-
size = reiserfs_xattr_get(inode, name, NULL, 0);
if (size < 0) {
if (size == -ENODATA || size == -ENOSYS) {
- *p_acl = ERR_PTR(-ENODATA);
+ set_cached_acl(inode, type, NULL);
return NULL;
}
return ERR_PTR(size);
@@ -262,14 +234,13 @@
/* This shouldn't actually happen as it should have
been caught above.. but just in case */
acl = NULL;
- *p_acl = ERR_PTR(-ENODATA);
} else if (retval < 0) {
acl = ERR_PTR(retval);
} else {
acl = posix_acl_from_disk(value, retval);
- if (!IS_ERR(acl))
- iset_acl(inode, p_acl, acl);
}
+ if (!IS_ERR(acl))
+ set_cached_acl(inode, type, acl);
kfree(value);
return acl;
@@ -287,10 +258,8 @@
{
char *name;
void *value = NULL;
- struct posix_acl **p_acl;
size_t size = 0;
int error;
- struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
@@ -298,7 +267,6 @@
switch (type) {
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
- p_acl = &reiserfs_i->i_acl_access;
if (acl) {
mode_t mode = inode->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
@@ -313,7 +281,6 @@
break;
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
- p_acl = &reiserfs_i->i_acl_default;
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
break;
@@ -346,7 +313,7 @@
kfree(value);
if (!error)
- iset_acl(inode, p_acl, acl);
+ set_cached_acl(inode, type, acl);
return error;
}
@@ -379,11 +346,8 @@
}
acl = reiserfs_get_acl(dir, ACL_TYPE_DEFAULT);
- if (IS_ERR(acl)) {
- if (PTR_ERR(acl) == -ENODATA)
- goto apply_umask;
+ if (IS_ERR(acl))
return PTR_ERR(acl);
- }
if (acl) {
struct posix_acl *acl_copy;
diff --git a/fs/super.c b/fs/super.c
index d40d53a..2761d3e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -608,6 +608,7 @@
static DEFINE_IDA(unnamed_dev_ida);
static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
+static int unnamed_dev_start = 0; /* don't bother trying below it */
int set_anon_super(struct super_block *s, void *data)
{
@@ -618,7 +619,9 @@
if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
return -ENOMEM;
spin_lock(&unnamed_dev_lock);
- error = ida_get_new(&unnamed_dev_ida, &dev);
+ error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
+ if (!error)
+ unnamed_dev_start = dev + 1;
spin_unlock(&unnamed_dev_lock);
if (error == -EAGAIN)
/* We raced and lost with another CPU. */
@@ -629,6 +632,8 @@
if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, dev);
+ if (unnamed_dev_start > dev)
+ unnamed_dev_start = dev;
spin_unlock(&unnamed_dev_lock);
return -EMFILE;
}
@@ -645,6 +650,8 @@
generic_shutdown_super(sb);
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, slot);
+ if (slot < unnamed_dev_start)
+ unnamed_dev_start = slot;
spin_unlock(&unnamed_dev_lock);
}
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index cfd31e2..adafcf5 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -55,9 +55,9 @@
* ACL support is not implemented.
*/
+#include "ubifs.h"
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
-#include "ubifs.h"
/*
* Limit the number of extended attributes per inode so that the total size
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index e48e9a3..1e06853 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -238,7 +238,7 @@
mutex_lock(&sbi->s_alloc_mutex);
part_len = sbi->s_partmaps[partition].s_partition_len;
- if (first_block < 0 || first_block >= part_len)
+ if (first_block >= part_len)
goto out;
if (first_block + block_count > part_len)
@@ -297,7 +297,7 @@
mutex_lock(&sbi->s_alloc_mutex);
repeat:
- if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
+ if (goal >= sbi->s_partmaps[partition].s_partition_len)
goal = 0;
nr_groups = bitmap->s_nr_groups;
@@ -666,8 +666,7 @@
int8_t etype = -1;
struct udf_inode_info *iinfo;
- if (first_block < 0 ||
- first_block >= sbi->s_partmaps[partition].s_partition_len)
+ if (first_block >= sbi->s_partmaps[partition].s_partition_len)
return 0;
iinfo = UDF_I(table);
@@ -743,7 +742,7 @@
return newblock;
mutex_lock(&sbi->s_alloc_mutex);
- if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
+ if (goal >= sbi->s_partmaps[partition].s_partition_len)
goal = 0;
/* We search for the closest matching block to goal. If we find
diff --git a/fs/udf/lowlevel.c b/fs/udf/lowlevel.c
index 703843f..1b88fd5 100644
--- a/fs/udf/lowlevel.c
+++ b/fs/udf/lowlevel.c
@@ -56,7 +56,12 @@
struct block_device *bdev = sb->s_bdev;
unsigned long lblock = 0;
- if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock))
+ /*
+ * ioctl failed or returned obviously bogus value?
+ * Try using the device size...
+ */
+ if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock) ||
+ lblock == 0)
lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits;
if (lblock)
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 1e9d124..b23a545 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -25,14 +25,10 @@
#include <linux/posix_acl_xattr.h>
-#define XFS_ACL_NOT_CACHED ((void *)-1)
-
/*
* Locking scheme:
* - all ACL updates are protected by inode->i_mutex, which is taken before
* calling into this file.
- * - access and updates to the ip->i_acl and ip->i_default_acl pointers are
- * protected by inode->i_lock.
*/
STATIC struct posix_acl *
@@ -102,59 +98,35 @@
}
}
-/*
- * Update the cached ACL pointer in the inode.
- *
- * Because we don't hold any locks while reading/writing the attribute
- * from/to disk another thread could have raced and updated the cached
- * ACL value before us. In that case we release the previous cached value
- * and update it with our new value.
- */
-STATIC void
-xfs_update_cached_acl(struct inode *inode, struct posix_acl **p_acl,
- struct posix_acl *acl)
-{
- spin_lock(&inode->i_lock);
- if (*p_acl && *p_acl != XFS_ACL_NOT_CACHED)
- posix_acl_release(*p_acl);
- *p_acl = posix_acl_dup(acl);
- spin_unlock(&inode->i_lock);
-}
-
struct posix_acl *
xfs_get_acl(struct inode *inode, int type)
{
struct xfs_inode *ip = XFS_I(inode);
- struct posix_acl *acl = NULL, **p_acl;
+ struct posix_acl *acl;
struct xfs_acl *xfs_acl;
int len = sizeof(struct xfs_acl);
char *ea_name;
int error;
+ acl = get_cached_acl(inode, type);
+ if (acl != ACL_NOT_CACHED)
+ return acl;
+
switch (type) {
case ACL_TYPE_ACCESS:
ea_name = SGI_ACL_FILE;
- p_acl = &ip->i_acl;
break;
case ACL_TYPE_DEFAULT:
ea_name = SGI_ACL_DEFAULT;
- p_acl = &ip->i_default_acl;
break;
default:
- return ERR_PTR(-EINVAL);
+ BUG();
}
- spin_lock(&inode->i_lock);
- if (*p_acl != XFS_ACL_NOT_CACHED)
- acl = posix_acl_dup(*p_acl);
- spin_unlock(&inode->i_lock);
-
/*
* If we have a cached ACLs value just return it, not need to
* go out to the disk.
*/
- if (acl)
- return acl;
xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL);
if (!xfs_acl)
@@ -165,7 +137,7 @@
/*
* If the attribute doesn't exist make sure we have a negative
* cache entry, for any other error assume it is transient and
- * leave the cache entry as XFS_ACL_NOT_CACHED.
+ * leave the cache entry as ACL_NOT_CACHED.
*/
if (error == -ENOATTR) {
acl = NULL;
@@ -179,7 +151,7 @@
goto out;
out_update_cache:
- xfs_update_cached_acl(inode, p_acl, acl);
+ set_cached_acl(inode, type, acl);
out:
kfree(xfs_acl);
return acl;
@@ -189,7 +161,6 @@
xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
struct xfs_inode *ip = XFS_I(inode);
- struct posix_acl **p_acl;
char *ea_name;
int error;
@@ -199,13 +170,11 @@
switch (type) {
case ACL_TYPE_ACCESS:
ea_name = SGI_ACL_FILE;
- p_acl = &ip->i_acl;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
return acl ? -EACCES : 0;
ea_name = SGI_ACL_DEFAULT;
- p_acl = &ip->i_default_acl;
break;
default:
return -EINVAL;
@@ -242,7 +211,7 @@
}
if (!error)
- xfs_update_cached_acl(inode, p_acl, acl);
+ set_cached_acl(inode, type, acl);
return error;
}
@@ -384,30 +353,6 @@
return error;
}
-void
-xfs_inode_init_acls(struct xfs_inode *ip)
-{
- /*
- * No need for locking, inode is not live yet.
- */
- ip->i_acl = XFS_ACL_NOT_CACHED;
- ip->i_default_acl = XFS_ACL_NOT_CACHED;
-}
-
-void
-xfs_inode_clear_acls(struct xfs_inode *ip)
-{
- /*
- * No need for locking here, the inode is not live anymore
- * and just about to be freed.
- */
- if (ip->i_acl != XFS_ACL_NOT_CACHED)
- posix_acl_release(ip->i_acl);
- if (ip->i_default_acl != XFS_ACL_NOT_CACHED)
- posix_acl_release(ip->i_default_acl);
-}
-
-
/*
* System xattr handlers.
*
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 63dc1f2..947b150 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -46,8 +46,6 @@
extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl);
extern int xfs_acl_chmod(struct inode *inode);
-extern void xfs_inode_init_acls(struct xfs_inode *ip);
-extern void xfs_inode_clear_acls(struct xfs_inode *ip);
extern int posix_acl_access_exists(struct inode *inode);
extern int posix_acl_default_exists(struct inode *inode);
@@ -57,8 +55,6 @@
# define xfs_get_acl(inode, type) NULL
# define xfs_inherit_acl(inode, default_acl) 0
# define xfs_acl_chmod(inode) 0
-# define xfs_inode_init_acls(ip)
-# define xfs_inode_clear_acls(ip)
# define posix_acl_access_exists(inode) 0
# define posix_acl_default_exists(inode) 0
#endif /* CONFIG_XFS_POSIX_ACL */
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 76c540f..5fcec6f 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -83,7 +83,6 @@
memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
ip->i_size = 0;
ip->i_new_size = 0;
- xfs_inode_init_acls(ip);
/*
* Initialize inode's trace buffers.
@@ -560,7 +559,6 @@
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(completion_done(&ip->i_flush));
- xfs_inode_clear_acls(ip);
kmem_zone_free(xfs_inode_zone, ip);
}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 7701670..1804f86 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -273,11 +273,6 @@
/* VFS inode */
struct inode i_vnode; /* embedded VFS inode */
-#ifdef CONFIG_XFS_POSIX_ACL
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-#endif
-
/* Trace buffers per inode. */
#ifdef XFS_INODE_TRACE
struct ktrace *i_trace; /* general inode trace */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c34b110..c65e4ce 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -114,10 +114,13 @@
acpi_op_notify notify;
};
+#define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
+
struct acpi_driver {
char name[80];
char class[80];
const struct acpi_device_id *ids; /* Supported Hardware IDs */
+ unsigned int flags;
struct acpi_device_ops ops;
struct device_driver drv;
struct module *owner;
@@ -168,7 +171,7 @@
/* Plug and Play */
-typedef char acpi_bus_id[5];
+typedef char acpi_bus_id[8];
typedef unsigned long acpi_bus_address;
typedef char acpi_hardware_id[15];
typedef char acpi_unique_id[9];
@@ -365,10 +368,10 @@
int register_acpi_bus_type(struct acpi_bus_type *);
int unregister_acpi_bus_type(struct acpi_bus_type *);
struct device *acpi_get_physical_device(acpi_handle);
-struct device *acpi_get_physical_pci_device(acpi_handle);
/* helper */
acpi_handle acpi_get_child(acpi_handle, acpi_integer);
+int acpi_is_root_bridge(acpi_handle);
acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int);
#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle))
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 0352c8f..f4906f6 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -57,8 +57,7 @@
*/
#define ACPI_POWER_HID "LNXPOWER"
-#define ACPI_PROCESSOR_OBJECT_HID "ACPI_CPU"
-#define ACPI_PROCESSOR_HID "ACPI0007"
+#define ACPI_PROCESSOR_OBJECT_HID "LNXCPU"
#define ACPI_SYSTEM_HID "LNXSYSTM"
#define ACPI_THERMAL_HID "LNXTHERM"
#define ACPI_BUTTON_HID_POWERF "LNXPWRBN"
@@ -91,17 +90,15 @@
/* ACPI PCI Interrupt Routing (pci_irq.c) */
-int acpi_pci_irq_add_prt(acpi_handle handle, int segment, int bus);
-void acpi_pci_irq_del_prt(int segment, int bus);
+int acpi_pci_irq_add_prt(acpi_handle handle, struct pci_bus *bus);
+void acpi_pci_irq_del_prt(struct pci_bus *bus);
/* ACPI PCI Device Binding (pci_bind.c) */
struct pci_bus;
-acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id);
-int acpi_pci_bind(struct acpi_device *device);
-int acpi_pci_bind_root(struct acpi_device *device, struct acpi_pci_id *id,
- struct pci_bus *bus);
+struct pci_dev *acpi_get_pci_dev(acpi_handle);
+int acpi_pci_bind_root(struct acpi_device *device);
/* Arch-defined function to add a bus to the system */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 4927c06..baf1e0a 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -258,6 +258,7 @@
extern struct acpi_processor_errata errata;
void arch_acpi_processor_init_pdc(struct acpi_processor *pr);
+void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr);
#ifdef ARCH_HAS_POWER_INIT
void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
diff --git a/include/acpi/video.h b/include/acpi/video.h
index af6fe95..cf7be3d 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -3,10 +3,10 @@
#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
extern int acpi_video_register(void);
-extern int acpi_video_exit(void);
+extern void acpi_video_unregister(void);
#else
static inline int acpi_video_register(void) { return 0; }
-static inline void acpi_video_exit(void) { return; }
+static inline void acpi_video_unregister(void) { return; }
#endif
#endif
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index 3d5d2c9..23bb4da 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -11,19 +11,6 @@
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-#ifndef HARDIRQ_BITS
-#define HARDIRQ_BITS 8
-#endif
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
#ifndef ack_bad_irq
static inline void ack_bad_irq(unsigned int irq)
{
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index e410f60..e2bd73e 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -129,6 +129,10 @@
#define move_pte(pte, prot, old_addr, new_addr) (pte)
#endif
+#ifndef pgprot_noncached
+#define pgprot_noncached(prot) (prot)
+#endif
+
#ifndef pgprot_writecombine
#define pgprot_writecombine pgprot_noncached
#endif
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 6d8cab2..b218b85 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -163,7 +163,7 @@
#define put_user(x, ptr) \
({ \
might_sleep(); \
- __access_ok(ptr, sizeof (*ptr)) ? \
+ access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
__put_user(x, ptr) : \
-EFAULT; \
})
@@ -219,7 +219,7 @@
#define get_user(x, ptr) \
({ \
might_sleep(); \
- __access_ok(ptr, sizeof (*ptr)) ? \
+ access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
__get_user(x, ptr) : \
-EFAULT; \
})
@@ -244,7 +244,7 @@
const void __user * from, unsigned long n)
{
might_sleep();
- if (__access_ok(from, n))
+ if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
else
return n;
@@ -254,7 +254,7 @@
const void *from, unsigned long n)
{
might_sleep();
- if (__access_ok(to, n))
+ if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
else
return n;
@@ -278,7 +278,7 @@
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
- if (!__access_ok(src, 1))
+ if (!access_ok(VERIFY_READ, src, 1))
return -EFAULT;
return __strncpy_from_user(dst, src, count);
}
@@ -291,6 +291,8 @@
#ifndef strnlen_user
static inline long strnlen_user(const char __user *src, long n)
{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return 0;
return strlen((void * __force)src) + 1;
}
#endif
@@ -316,7 +318,7 @@
clear_user(void __user *to, unsigned long n)
{
might_sleep();
- if (!__access_ok(to, n))
+ if (!access_ok(VERIFY_WRITE, to, n))
return n;
return __clear_user(to, n);
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 5b34b62..1125e5a 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -618,8 +618,13 @@
__SYSCALL(__NR_move_pages, sys_move_pages)
#endif
+#define __NR_rt_tgsigqueueinfo 240
+__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
+#define __NR_perf_counter_open 241
+__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
+
#undef __NR_syscalls
-#define __NR_syscalls 240
+#define __NR_syscalls 242
/*
* All syscalls below here should go away really,
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 55413e5..92b73b6 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -625,7 +625,7 @@
*(.init.ramfs) \
VMLINUX_SYMBOL(__initramfs_end) = .;
#else
-#define INITRAMFS
+#define INIT_RAM_FS
#endif
/**
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c263e4d..7d6c9a2 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -35,11 +35,11 @@
} __attribute__((packed));
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
-#define EDID_TIMING_ASPECT_SHIFT 0
+#define EDID_TIMING_ASPECT_SHIFT 6
#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
/* need to add 60 */
-#define EDID_TIMING_VFREQ_SHIFT 2
+#define EDID_TIMING_VFREQ_SHIFT 0
#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
struct std_timing {
@@ -47,11 +47,11 @@
u8 vfreq_aspect;
} __attribute__((packed));
-#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 6)
-#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 5)
+#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
-#define DRM_EDID_PT_STEREO (1 << 2)
-#define DRM_EDID_PT_INTERLACED (1 << 1)
+#define DRM_EDID_PT_STEREO (1 << 5)
+#define DRM_EDID_PT_INTERLACED (1 << 7)
/* If detailed data is pixel timing */
struct detailed_pixel_timing {
@@ -93,7 +93,7 @@
} __attribute__((packed));
struct detailed_data_wpindex {
- u8 white_xy_lo; /* Upper 2 bits each */
+ u8 white_yx_lo; /* Lower 2 bits each */
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
@@ -135,21 +135,21 @@
} data;
} __attribute__((packed));
-#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 7)
-#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 5)
-#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 4)
+#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
+#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
+#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
-#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 2)
-#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 1)
-#define DRM_EDID_INPUT_DIGITAL (1 << 0) /* bits above must be zero if set */
+#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
+#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
+#define DRM_EDID_INPUT_DIGITAL (1 << 7) /* bits below must be zero if set */
-#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 7)
-#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 6)
-#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 5)
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
+#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
-#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 2)
-#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 1)
-#define DRM_EDID_FEATURE_PM_STANDBY (1 << 0)
+#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
+#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
+#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
struct edid {
u8 header[8];
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 03f2207..334a359 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -57,6 +57,7 @@
header-y += dlm_device.h
header-y += dlm_netlink.h
header-y += dm-ioctl.h
+header-y += dm-log-userspace.h
header-y += dn.h
header-y += dqblk_xfs.h
header-y += efs_fs_sb.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 51b4b0a..34321cf 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -113,9 +113,6 @@
extern u32 acpi_irq_handled;
extern u32 acpi_irq_not_handled;
-extern struct acpi_mcfg_allocation *pci_mmcfg_config;
-extern int pci_mmcfg_config_num;
-
extern int sbf_port;
extern unsigned long acpi_realmode_flags;
@@ -293,7 +290,10 @@
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
+extern void acpi_early_init(void);
+
#else /* CONFIG_ACPI */
+static inline void acpi_early_init(void) { }
static inline int early_acpi_boot_init(void)
{
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 4fa2810..3c7a358 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -599,6 +599,8 @@
extern void audit_log_d_path(struct audit_buffer *ab,
const char *prefix,
struct path *path);
+extern void audit_log_key(struct audit_buffer *ab,
+ char *key);
extern void audit_log_lost(const char *message);
extern int audit_update_lsm_rules(void);
@@ -621,6 +623,7 @@
#define audit_log_n_untrustedstring(a,n,s) do { ; } while (0)
#define audit_log_untrustedstring(a,s) do { ; } while (0)
#define audit_log_d_path(b, p, d) do { ; } while (0)
+#define audit_log_key(b, k) do { ; } while (0)
#define audit_enabled 0
#endif
#endif
diff --git a/include/linux/connector.h b/include/linux/connector.h
index b9966e6..b68d278 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -41,8 +41,10 @@
#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
#define CN_DST_IDX 0x6
#define CN_DST_VAL 0x1
+#define CN_IDX_DM 0x7 /* Device Mapper */
+#define CN_VAL_DM_USERSPACE_LOG 0x1
-#define CN_NETLINK_USERS 7
+#define CN_NETLINK_USERS 8
/*
* Maximum connector's message size.
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 2643d84..4d668e0 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -69,7 +69,6 @@
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
-extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
@@ -84,10 +83,6 @@
{
}
-static inline void cpu_hotplug_init(void)
-{
-}
-
static inline void cpu_maps_update_begin(void)
{
}
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 49c2362..0d63106 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -11,6 +11,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+struct dm_dev;
struct dm_target;
struct dm_table;
struct mapped_device;
@@ -21,6 +22,7 @@
union map_info {
void *ptr;
unsigned long long ll;
+ unsigned flush_request;
};
/*
@@ -80,6 +82,15 @@
typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size);
+typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
+ struct dm_dev *dev,
+ sector_t physical_start,
+ void *data);
+
+typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
+ iterate_devices_callout_fn fn,
+ void *data);
+
/*
* Returns:
* 0: The target can handle the next I/O immediately.
@@ -92,7 +103,8 @@
/*
* Combine device limits.
*/
-void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
+int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, void *data);
struct dm_dev {
struct block_device *bdev;
@@ -138,23 +150,12 @@
dm_ioctl_fn ioctl;
dm_merge_fn merge;
dm_busy_fn busy;
+ dm_iterate_devices_fn iterate_devices;
/* For internal device-mapper use. */
struct list_head list;
};
-struct io_restrictions {
- unsigned long bounce_pfn;
- unsigned long seg_boundary_mask;
- unsigned max_hw_sectors;
- unsigned max_sectors;
- unsigned max_segment_size;
- unsigned short logical_block_size;
- unsigned short max_hw_segments;
- unsigned short max_phys_segments;
- unsigned char no_cluster; /* inverted so that 0 is default */
-};
-
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -163,15 +164,18 @@
sector_t begin;
sector_t len;
- /* FIXME: turn this into a mask, and merge with io_restrictions */
/* Always a power of 2 */
sector_t split_io;
/*
- * These are automatically filled in by
- * dm_table_get_device.
+ * A number of zero-length barrier requests that will be submitted
+ * to the target for the purpose of flushing cache.
+ *
+ * The request number will be placed in union map_info->flush_request.
+ * It is a responsibility of the target driver to remap these requests
+ * to the real underlying devices.
*/
- struct io_restrictions limits;
+ unsigned num_flush_requests;
/* target specific data */
void *private;
@@ -230,6 +234,7 @@
int dm_suspended(struct mapped_device *md);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
+union map_info *dm_get_rq_mapinfo(struct request *rq);
/*
* Geometry functions.
@@ -392,4 +397,12 @@
return (n << SECTOR_SHIFT);
}
+/*-----------------------------------------------------------------
+ * Helper for block layer and dm core operations
+ *---------------------------------------------------------------*/
+void dm_dispatch_request(struct request *rq);
+void dm_requeue_unmapped_request(struct request *rq);
+void dm_kill_unmapped_request(struct request *rq, int error);
+int dm_underlying_device_busy(struct request_queue *q);
+
#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 48e44ee..2ab84c8 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -123,6 +123,16 @@
__u32 target_count; /* in/out */
__s32 open_count; /* out */
__u32 flags; /* in/out */
+
+ /*
+ * event_nr holds either the event number (input and output) or the
+ * udev cookie value (input only).
+ * The DM_DEV_WAIT ioctl takes an event number as input.
+ * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls
+ * use the field as a cookie to return in the DM_COOKIE
+ * variable with the uevents they issue.
+ * For output, the ioctls return the event number, not the cookie.
+ */
__u32 event_nr; /* in/out */
__u32 padding;
@@ -256,9 +266,9 @@
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 14
+#define DM_VERSION_MINOR 15
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2008-04-23)"
+#define DM_VERSION_EXTRA "-ioctl (2009-04-01)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h
new file mode 100644
index 0000000..642e301
--- /dev/null
+++ b/include/linux/dm-log-userspace.h
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2006-2009 Red Hat, Inc.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef __DM_LOG_USERSPACE_H__
+#define __DM_LOG_USERSPACE_H__
+
+#include <linux/dm-ioctl.h> /* For DM_UUID_LEN */
+
+/*
+ * The device-mapper userspace log module consists of a kernel component and
+ * a user-space component. The kernel component implements the API defined
+ * in dm-dirty-log.h. Its purpose is simply to pass the parameters and
+ * return values of those API functions between kernel and user-space.
+ *
+ * Below are defined the 'request_types' - DM_ULOG_CTR, DM_ULOG_DTR, etc.
+ * These request types represent the different functions in the device-mapper
+ * dirty log API. Each of these is described in more detail below.
+ *
+ * The user-space program must listen for requests from the kernel (representing
+ * the various API functions) and process them.
+ *
+ * User-space begins by setting up the communication link (error checking
+ * removed for clarity):
+ * fd = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
+ * addr.nl_family = AF_NETLINK;
+ * addr.nl_groups = CN_IDX_DM;
+ * addr.nl_pid = 0;
+ * r = bind(fd, (struct sockaddr *) &addr, sizeof(addr));
+ * opt = addr.nl_groups;
+ * setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &opt, sizeof(opt));
+ *
+ * User-space will then wait to receive requests form the kernel, which it
+ * will process as described below. The requests are received in the form,
+ * ((struct dm_ulog_request) + (additional data)). Depending on the request
+ * type, there may or may not be 'additional data'. In the descriptions below,
+ * you will see 'Payload-to-userspace' and 'Payload-to-kernel'. The
+ * 'Payload-to-userspace' is what the kernel sends in 'additional data' as
+ * necessary parameters to complete the request. The 'Payload-to-kernel' is
+ * the 'additional data' returned to the kernel that contains the necessary
+ * results of the request. The 'data_size' field in the dm_ulog_request
+ * structure denotes the availability and amount of payload data.
+ */
+
+/*
+ * DM_ULOG_CTR corresponds to (found in dm-dirty-log.h):
+ * int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
+ * unsigned argc, char **argv);
+ *
+ * Payload-to-userspace:
+ * A single string containing all the argv arguments separated by ' 's
+ * Payload-to-kernel:
+ * None. ('data_size' in the dm_ulog_request struct should be 0.)
+ *
+ * The UUID contained in the dm_ulog_request structure is the reference that
+ * will be used by all request types to a specific log. The constructor must
+ * record this assotiation with instance created.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_CTR 1
+
+/*
+ * DM_ULOG_DTR corresponds to (found in dm-dirty-log.h):
+ * void (*dtr)(struct dm_dirty_log *log);
+ *
+ * Payload-to-userspace:
+ * A single string containing all the argv arguments separated by ' 's
+ * Payload-to-kernel:
+ * None. ('data_size' in the dm_ulog_request struct should be 0.)
+ *
+ * The UUID contained in the dm_ulog_request structure is all that is
+ * necessary to identify the log instance being destroyed. There is no
+ * payload data.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and clearing
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_DTR 2
+
+/*
+ * DM_ULOG_PRESUSPEND corresponds to (found in dm-dirty-log.h):
+ * int (*presuspend)(struct dm_dirty_log *log);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * None.
+ *
+ * The UUID contained in the dm_ulog_request structure is all that is
+ * necessary to identify the log instance being presuspended. There is no
+ * payload data.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_PRESUSPEND 3
+
+/*
+ * DM_ULOG_POSTSUSPEND corresponds to (found in dm-dirty-log.h):
+ * int (*postsuspend)(struct dm_dirty_log *log);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * None.
+ *
+ * The UUID contained in the dm_ulog_request structure is all that is
+ * necessary to identify the log instance being postsuspended. There is no
+ * payload data.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_POSTSUSPEND 4
+
+/*
+ * DM_ULOG_RESUME corresponds to (found in dm-dirty-log.h):
+ * int (*resume)(struct dm_dirty_log *log);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * None.
+ *
+ * The UUID contained in the dm_ulog_request structure is all that is
+ * necessary to identify the log instance being resumed. There is no
+ * payload data.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_RESUME 5
+
+/*
+ * DM_ULOG_GET_REGION_SIZE corresponds to (found in dm-dirty-log.h):
+ * uint32_t (*get_region_size)(struct dm_dirty_log *log);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * uint64_t - contains the region size
+ *
+ * The region size is something that was determined at constructor time.
+ * It is returned in the payload area and 'data_size' is set to
+ * reflect this.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field appropriately.
+ */
+#define DM_ULOG_GET_REGION_SIZE 6
+
+/*
+ * DM_ULOG_IS_CLEAN corresponds to (found in dm-dirty-log.h):
+ * int (*is_clean)(struct dm_dirty_log *log, region_t region);
+ *
+ * Payload-to-userspace:
+ * uint64_t - the region to get clean status on
+ * Payload-to-kernel:
+ * int64_t - 1 if clean, 0 otherwise
+ *
+ * Payload is sizeof(uint64_t) and contains the region for which the clean
+ * status is being made.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - filling the payload with 0 (not clean) or
+ * 1 (clean), setting 'data_size' and 'error' appropriately.
+ */
+#define DM_ULOG_IS_CLEAN 7
+
+/*
+ * DM_ULOG_IN_SYNC corresponds to (found in dm-dirty-log.h):
+ * int (*in_sync)(struct dm_dirty_log *log, region_t region,
+ * int can_block);
+ *
+ * Payload-to-userspace:
+ * uint64_t - the region to get sync status on
+ * Payload-to-kernel:
+ * int64_t - 1 if in-sync, 0 otherwise
+ *
+ * Exactly the same as 'is_clean' above, except this time asking "has the
+ * region been recovered?" vs. "is the region not being modified?"
+ */
+#define DM_ULOG_IN_SYNC 8
+
+/*
+ * DM_ULOG_FLUSH corresponds to (found in dm-dirty-log.h):
+ * int (*flush)(struct dm_dirty_log *log);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * None.
+ *
+ * No incoming or outgoing payload. Simply flush log state to disk.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and clearing
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_FLUSH 9
+
+/*
+ * DM_ULOG_MARK_REGION corresponds to (found in dm-dirty-log.h):
+ * void (*mark_region)(struct dm_dirty_log *log, region_t region);
+ *
+ * Payload-to-userspace:
+ * uint64_t [] - region(s) to mark
+ * Payload-to-kernel:
+ * None.
+ *
+ * Incoming payload contains the one or more regions to mark dirty.
+ * The number of regions contained in the payload can be determined from
+ * 'data_size/sizeof(uint64_t)'.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and clearing
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_MARK_REGION 10
+
+/*
+ * DM_ULOG_CLEAR_REGION corresponds to (found in dm-dirty-log.h):
+ * void (*clear_region)(struct dm_dirty_log *log, region_t region);
+ *
+ * Payload-to-userspace:
+ * uint64_t [] - region(s) to clear
+ * Payload-to-kernel:
+ * None.
+ *
+ * Incoming payload contains the one or more regions to mark clean.
+ * The number of regions contained in the payload can be determined from
+ * 'data_size/sizeof(uint64_t)'.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and clearing
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_CLEAR_REGION 11
+
+/*
+ * DM_ULOG_GET_RESYNC_WORK corresponds to (found in dm-dirty-log.h):
+ * int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * {
+ * int64_t i; -- 1 if recovery necessary, 0 otherwise
+ * uint64_t r; -- The region to recover if i=1
+ * }
+ * 'data_size' should be set appropriately.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field appropriately.
+ */
+#define DM_ULOG_GET_RESYNC_WORK 12
+
+/*
+ * DM_ULOG_SET_REGION_SYNC corresponds to (found in dm-dirty-log.h):
+ * void (*set_region_sync)(struct dm_dirty_log *log,
+ * region_t region, int in_sync);
+ *
+ * Payload-to-userspace:
+ * {
+ * uint64_t - region to set sync state on
+ * int64_t - 0 if not-in-sync, 1 if in-sync
+ * }
+ * Payload-to-kernel:
+ * None.
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and clearing
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_SET_REGION_SYNC 13
+
+/*
+ * DM_ULOG_GET_SYNC_COUNT corresponds to (found in dm-dirty-log.h):
+ * region_t (*get_sync_count)(struct dm_dirty_log *log);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * uint64_t - the number of in-sync regions
+ *
+ * No incoming payload. Kernel-bound payload contains the number of
+ * regions that are in-sync (in a size_t).
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_GET_SYNC_COUNT 14
+
+/*
+ * DM_ULOG_STATUS_INFO corresponds to (found in dm-dirty-log.h):
+ * int (*status)(struct dm_dirty_log *log, STATUSTYPE_INFO,
+ * char *result, unsigned maxlen);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * Character string containing STATUSTYPE_INFO
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_STATUS_INFO 15
+
+/*
+ * DM_ULOG_STATUS_TABLE corresponds to (found in dm-dirty-log.h):
+ * int (*status)(struct dm_dirty_log *log, STATUSTYPE_TABLE,
+ * char *result, unsigned maxlen);
+ *
+ * Payload-to-userspace:
+ * None.
+ * Payload-to-kernel:
+ * Character string containing STATUSTYPE_TABLE
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_STATUS_TABLE 16
+
+/*
+ * DM_ULOG_IS_REMOTE_RECOVERING corresponds to (found in dm-dirty-log.h):
+ * int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
+ *
+ * Payload-to-userspace:
+ * uint64_t - region to determine recovery status on
+ * Payload-to-kernel:
+ * {
+ * int64_t is_recovering; -- 0 if no, 1 if yes
+ * uint64_t in_sync_hint; -- lowest region still needing resync
+ * }
+ *
+ * When the request has been processed, user-space must return the
+ * dm_ulog_request to the kernel - setting the 'error' field and
+ * 'data_size' appropriately.
+ */
+#define DM_ULOG_IS_REMOTE_RECOVERING 17
+
+/*
+ * (DM_ULOG_REQUEST_MASK & request_type) to get the request type
+ *
+ * Payload-to-userspace:
+ * A single string containing all the argv arguments separated by ' 's
+ * Payload-to-kernel:
+ * None. ('data_size' in the dm_ulog_request struct should be 0.)
+ *
+ * We are reserving 8 bits of the 32-bit 'request_type' field for the
+ * various request types above. The remaining 24-bits are currently
+ * set to zero and are reserved for future use and compatibility concerns.
+ *
+ * User-space should always use DM_ULOG_REQUEST_TYPE to aquire the
+ * request type from the 'request_type' field to maintain forward compatibility.
+ */
+#define DM_ULOG_REQUEST_MASK 0xFF
+#define DM_ULOG_REQUEST_TYPE(request_type) \
+ (DM_ULOG_REQUEST_MASK & (request_type))
+
+struct dm_ulog_request {
+ char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */
+ char padding[7]; /* Padding because DM_UUID_LEN = 129 */
+
+ int32_t error; /* Used to report back processing errors */
+
+ uint32_t seq; /* Sequence number for request */
+ uint32_t request_type; /* DM_ULOG_* defined above */
+ uint32_t data_size; /* How much data (not including this struct) */
+
+ char data[0];
+};
+
+#endif /* __DM_LOG_USERSPACE_H__ */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 1a455f1..5619f85 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -13,6 +13,10 @@
#define DMA_PTE_WRITE (2)
#define DMA_PTE_SNP (1 << 11)
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1
+#define CONTEXT_TT_PASS_THROUGH 2
+
struct intel_iommu;
struct dmar_domain;
struct root_entry;
@@ -21,11 +25,16 @@
#ifdef CONFIG_DMAR
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
return 0;
}
+static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
#endif
extern int dmar_disabled;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 10ff5c4..4a2b162 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -126,6 +126,8 @@
extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic);
+extern int set_ioapic_sid(struct irte *irte, int apic);
+extern int set_msi_sid(struct irte *irte, struct pci_dev *dev);
#else
static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
@@ -156,6 +158,15 @@
{
return NULL;
}
+static inline int set_ioapic_sid(struct irte *irte, int apic)
+{
+ return 0;
+}
+static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
+{
+ return 0;
+}
+
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1)
#define disable_intr_remapping() (0)
@@ -188,6 +199,15 @@
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+
+struct dmar_atsr_unit {
+ struct list_head list; /* list of ATSR units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct pci_dev **devices; /* target devices */
+ int devices_cnt; /* target device count */
+ u8 include_all:1; /* include all ports */
+};
+
/* Intel DMAR initialization functions */
extern int intel_iommu_init(void);
#else
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 7894dd0..ca1bfe9 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -103,10 +103,6 @@
*/
struct rw_semaphore xattr_sem;
#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-#endif
struct list_head i_orphan; /* unlinked but open inodes */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 8e912ab..3c15510 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -3,4 +3,25 @@
#define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */
+#ifdef __KERNEL__
+
+/*
+ * Space reservation ioctls and argument structure
+ * are designed to be compatible with the legacy XFS ioctls.
+ */
+struct space_resv {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start;
+ __s64 l_len; /* len == 0 means until end of file */
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserved area */
+};
+
+#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
+#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+
+#endif /* __KERNEL__ */
+
#endif /* _FALLOC_H_ */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index e584b72..9823946 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -3,6 +3,7 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -355,4 +356,90 @@
int generation, int speed, unsigned long long offset,
void *payload, size_t length);
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+ return tag << 14 | channel << 8 | sy;
+}
+
+struct fw_descriptor {
+ struct list_head link;
+ size_t length;
+ u32 immediate;
+ u32 key;
+ const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+struct fw_iso_packet {
+ u16 payload_length; /* Length of indirect payload. */
+ u32 interrupt:1; /* Generate interrupt on this packet */
+ u32 skip:1; /* Set to not send packet at all. */
+ u32 tag:2;
+ u32 sy:4;
+ u32 header_length:8; /* Length of immediate header. */
+ u32 header[0];
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT 0
+#define FW_ISO_CONTEXT_RECEIVE 1
+
+#define FW_ISO_CONTEXT_MATCH_TAG0 1
+#define FW_ISO_CONTEXT_MATCH_TAG1 2
+#define FW_ISO_CONTEXT_MATCH_TAG2 4
+#define FW_ISO_CONTEXT_MATCH_TAG3 8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction. Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space. We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+struct fw_iso_buffer {
+ enum dma_data_direction direction;
+ struct page **pages;
+ int page_count;
+};
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context;
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+ u32 cycle, size_t header_length,
+ void *header, void *data);
+struct fw_iso_context {
+ struct fw_card *card;
+ int type;
+ int channel;
+ int speed;
+ size_t header_size;
+ fw_iso_callback_t callback;
+ void *callback_data;
+};
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+ int type, int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+ int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
+
#endif /* _LINUX_FIREWIRE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 74a5793..0872372 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -710,6 +710,9 @@
#define i_size_ordered_init(inode) do { } while (0)
#endif
+struct posix_acl;
+#define ACL_NOT_CACHED ((void *)(-1))
+
struct inode {
struct hlist_node i_hash;
struct list_head i_list;
@@ -773,6 +776,10 @@
#ifdef CONFIG_SECURITY
void *i_security;
#endif
+#ifdef CONFIG_FS_POSIX_ACL
+ struct posix_acl *i_acl;
+ struct posix_acl *i_default_acl;
+#endif
void *i_private; /* fs or device private pointer */
};
@@ -1107,6 +1114,7 @@
extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
+extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
@@ -1905,6 +1913,8 @@
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
+extern int do_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
int mode);
extern struct file *filp_open(const char *, int, int);
@@ -1913,6 +1923,10 @@
extern int filp_close(struct file *, fl_owner_t id);
extern char * getname(const char __user *);
+/* fs/ioctl.c */
+
+extern int ioctl_preallocate(struct file *filp, void __user *argp);
+
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(unsigned long);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 7400900..54648e6 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/percpu.h>
+#include <linux/timer.h>
struct hrtimer_clock_base;
@@ -447,6 +448,8 @@
static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
{
+ if (likely(!timer->start_pid))
+ return;
timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
timer->function, timer->start_comm, 0);
}
@@ -456,6 +459,8 @@
static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
{
+ if (likely(!timer_stats_active))
+ return;
__timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a05a5ef..2723513 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -33,7 +33,7 @@
int hugetlb_report_node_meminfo(int, char *);
unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access);
+ unsigned long address, unsigned int flags);
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
int acctflags);
@@ -98,7 +98,7 @@
#define pud_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
+#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define hugetlb_change_protection(vma, address, end, newprot)
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 10d701e..b6a8518 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -175,16 +175,16 @@
extern void icmpv6_send(struct sk_buff *skb,
- int type, int code,
+ u8 type, u8 code,
__u32 info,
struct net_device *dev);
extern int icmpv6_init(void);
-extern int icmpv6_err_convert(int type, int code,
+extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
extern void icmpv6_param_prob(struct sk_buff *skb,
- int code, int pos);
+ u8 code, int pos);
struct flowi;
struct in6_addr;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 95c6e00..cf1f388 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1361,7 +1361,6 @@
#ifdef CONFIG_BLK_DEV_IDEDMA
int ide_dma_good_drive(ide_drive_t *);
int __ide_dma_bad_drive(ide_drive_t *);
-int ide_id_dma_bug(ide_drive_t *);
u8 ide_find_dma_mode(ide_drive_t *, u8);
@@ -1402,7 +1401,6 @@
ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
#else
-static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index aa8c531..482dc91 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -53,6 +53,7 @@
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
@@ -120,8 +121,10 @@
(ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
+#define ecap_pass_through(e) ((e >> 6) & 0x1)
#define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1)
+#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
@@ -197,6 +200,8 @@
#define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1)
#define DMA_FSTS_IQE (1 << 4)
+#define DMA_FSTS_ICE (1 << 5)
+#define DMA_FSTS_ITE (1 << 6)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
@@ -225,7 +230,8 @@
enum {
QI_FREE,
QI_IN_USE,
- QI_DONE
+ QI_DONE,
+ QI_ABORT
};
#define QI_CC_TYPE 0x1
@@ -254,6 +260,12 @@
#define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_SIZE 1
+#define QI_DEV_IOTLB_MAX_INVS 32
+
struct qi_desc {
u64 low, high;
};
@@ -280,10 +292,10 @@
#endif
struct iommu_flush {
- int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
- u64 type, int non_present_entry_flush);
- int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type, int non_present_entry_flush);
+ void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+ void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
};
enum {
@@ -302,6 +314,7 @@
spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
unsigned int irq;
unsigned char name[13]; /* Device Name */
@@ -329,6 +342,7 @@
}
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu);
@@ -337,11 +351,12 @@
extern int dmar_reenable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
-extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type, int non_present_entry_flush);
-extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- int non_present_entry_flush);
+extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 32e4b2f..786e7b8 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -49,6 +49,8 @@
#define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */
#define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */
+#define IORESOURCE_MEM_64 0x00100000
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
new file mode 100644
index 0000000..afc9f9f
--- /dev/null
+++ b/include/linux/leds-lp3944.h
@@ -0,0 +1,53 @@
+/*
+ * leds-lp3944.h - platform data structure for lp3944 led controller
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_LEDS_LP3944_H
+#define __LINUX_LEDS_LP3944_H
+
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+
+#define LP3944_LED0 0
+#define LP3944_LED1 1
+#define LP3944_LED2 2
+#define LP3944_LED3 3
+#define LP3944_LED4 4
+#define LP3944_LED5 5
+#define LP3944_LED6 6
+#define LP3944_LED7 7
+#define LP3944_LEDS_MAX 8
+
+#define LP3944_LED_STATUS_MASK 0x03
+enum lp3944_status {
+ LP3944_LED_STATUS_OFF = 0x0,
+ LP3944_LED_STATUS_ON = 0x1,
+ LP3944_LED_STATUS_DIM0 = 0x2,
+ LP3944_LED_STATUS_DIM1 = 0x3
+};
+
+enum lp3944_type {
+ LP3944_LED_TYPE_NONE,
+ LP3944_LED_TYPE_LED,
+ LP3944_LED_TYPE_LED_INVERTED,
+};
+
+struct lp3944_led {
+ char *name;
+ enum lp3944_type type;
+ enum lp3944_status status;
+};
+
+struct lp3944_platform_data {
+ struct lp3944_led leds[LP3944_LEDS_MAX];
+ u8 leds_size;
+};
+
+#endif /* __LINUX_LEDS_LP3944_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 376fe07..d8bf966 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -45,7 +45,10 @@
/* Get LED brightness level */
enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
- /* Activate hardware accelerated blink */
+ /* Activate hardware accelerated blink, delays are in
+ * miliseconds and if none is provided then a sensible default
+ * should be chosen. The call can adjust the timings if it can't
+ * match the values specified exactly. */
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off);
@@ -141,9 +144,14 @@
const char *name;
const char *default_trigger;
unsigned gpio;
- u8 active_low : 1;
- u8 retain_state_suspended : 1;
+ unsigned active_low : 1;
+ unsigned retain_state_suspended : 1;
+ unsigned default_state : 2;
+ /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
};
+#define LEDS_GPIO_DEFSTATE_OFF 0
+#define LEDS_GPIO_DEFSTATE_ON 1
+#define LEDS_GPIO_DEFSTATE_KEEP 2
struct gpio_led_platform_data {
int num_leds;
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 51855df..c325b18 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -195,7 +195,7 @@
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
extern int nsm_use_hostnames;
-extern int nsm_local_state;
+extern u32 nsm_local_state;
/*
* Lockd client functions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index da5a5a1..b25d1b5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -258,6 +258,16 @@
#define lockdep_set_subclass(lock, sub) \
lockdep_init_map(&(lock)->dep_map, #lock, \
(lock)->dep_map.key, sub)
+/*
+ * Compare locking classes
+ */
+#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
+
+static inline int lockdep_match_key(struct lockdep_map *lock,
+ struct lock_class_key *key)
+{
+ return lock->key == key;
+}
/*
* Acquire a lock.
@@ -326,6 +336,11 @@
#define lockdep_set_class_and_subclass(lock, key, sub) \
do { (void)(key); } while (0)
#define lockdep_set_subclass(lock, sub) do { } while (0)
+/*
+ * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
+ * case since the result is not well defined and the caller should rather
+ * #ifdef the call himself.
+ */
# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h
new file mode 100644
index 0000000..ad97b06
--- /dev/null
+++ b/include/linux/max17040_battery.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2009 Samsung Electronics
+ * Minkyu Kang <mk7.kang@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAX17040_BATTERY_H_
+#define __MAX17040_BATTERY_H_
+
+struct max17040_platform_data {
+ int (*battery_online)(void);
+ int (*charger_online)(void);
+ int (*charger_enable)(void);
+};
+
+#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d006e93..ba3a7cb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -826,7 +826,7 @@
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+ unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 3a05929..3beb259 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -24,16 +24,10 @@
struct fs_struct;
+extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct fs_struct *);
-extern void __put_mnt_ns(struct mnt_namespace *ns);
-
-static inline void put_mnt_ns(struct mnt_namespace *ns)
-{
- if (atomic_dec_and_lock(&ns->count, &vfsmount_lock))
- /* releases vfsmount_lock */
- __put_mnt_ns(ns);
-}
+extern void put_mnt_ns(struct mnt_namespace *ns);
static inline void exit_mnt_ns(struct task_struct *p)
{
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7efb9be..4030eba 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -563,6 +563,7 @@
* @options: Option flags, e.g. 16bit buswidth
* @ecclayout: ecc layout info structure
* @part_probe_types: NULL-terminated array of probe types
+ * @set_parts: platform specific function to set partitions
* @priv: hardware controller specific settings
*/
struct platform_nand_chip {
@@ -574,26 +575,41 @@
int chip_delay;
unsigned int options;
const char **part_probe_types;
+ void (*set_parts)(uint64_t size,
+ struct platform_nand_chip *chip);
void *priv;
};
+/* Keep gcc happy */
+struct platform_device;
+
/**
* struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
* @hwcontrol: platform specific hardware control structure
* @dev_ready: platform specific function to read ready/busy pin
* @select_chip: platform specific chip select function
* @cmd_ctrl: platform specific function for controlling
* ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
* @priv: private data to transport driver specific settings
*
* All fields are optional and depend on the hardware driver requirements
*/
struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
void (*hwcontrol)(struct mtd_info *mtd, int cmd);
int (*dev_ready)(struct mtd_info *mtd);
void (*select_chip)(struct mtd_info *mtd, int chip);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat,
unsigned int ctrl);
+ void (*write_buf)(struct mtd_info *mtd,
+ const uint8_t *buf, int len);
+ void (*read_buf)(struct mtd_info *mtd,
+ uint8_t *buf, int len);
void *priv;
};
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 9aa2a91..8ed8733 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -17,6 +17,7 @@
#include <linux/mtd/onenand_regs.h>
#include <linux/mtd/bbm.h>
+#define MAX_DIES 2
#define MAX_BUFFERRAM 2
/* Scan and identify a OneNAND device */
@@ -51,7 +52,12 @@
/**
* struct onenand_chip - OneNAND Private Flash Chip Data
* @base: [BOARDSPECIFIC] address to access OneNAND
+ * @dies: [INTERN][FLEX-ONENAND] number of dies on chip
+ * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies
+ * @diesize: [INTERN][FLEX-ONENAND] Size of the dies
* @chipsize: [INTERN] the size of one chip for multichip arrays
+ * FIXME For Flex-OneNAND, chipsize holds maximum possible
+ * device size ie when all blocks are considered MLC
* @device_id: [INTERN] device ID
* @density_mask: chip density, used for DDP devices
* @verstion_id: [INTERN] version ID
@@ -68,6 +74,8 @@
* @command: [REPLACEABLE] hardware specific function for writing
* commands to the chip
* @wait: [REPLACEABLE] hardware specific function for wait on ready
+ * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready
+ * @unlock_all: [REPLACEABLE] hardware specific function for unlock all
* @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
* @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
* @read_word: [REPLACEABLE] hardware specific function for read
@@ -92,9 +100,13 @@
*/
struct onenand_chip {
void __iomem *base;
+ unsigned dies;
+ unsigned boundary[MAX_DIES];
+ loff_t diesize[MAX_DIES];
unsigned int chipsize;
unsigned int device_id;
unsigned int version_id;
+ unsigned int technology;
unsigned int density_mask;
unsigned int options;
@@ -108,6 +120,8 @@
int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len);
int (*wait)(struct mtd_info *mtd, int state);
+ int (*bbt_wait)(struct mtd_info *mtd, int state);
+ void (*unlock_all)(struct mtd_info *mtd);
int (*read_bufferram)(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count);
int (*write_bufferram)(struct mtd_info *mtd, int area,
@@ -145,6 +159,8 @@
#define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0)
#define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1)
+#define FLEXONENAND(this) \
+ (this->device_id & DEVICE_IS_FLEXONENAND)
#define ONENAND_GET_SYS_CFG1(this) \
(this->read_word(this->base + ONENAND_REG_SYS_CFG1))
#define ONENAND_SET_SYS_CFG1(v, this) \
@@ -153,6 +169,9 @@
#define ONENAND_IS_DDP(this) \
(this->device_id & ONENAND_DEVICE_IS_DDP)
+#define ONENAND_IS_MLC(this) \
+ (this->technology & ONENAND_TECHNOLOGY_IS_MLC)
+
#ifdef CONFIG_MTD_ONENAND_2X_PROGRAM
#define ONENAND_IS_2PLANE(this) \
(this->options & ONENAND_HAS_2PLANE)
@@ -169,6 +188,7 @@
#define ONENAND_HAS_CONT_LOCK (0x0001)
#define ONENAND_HAS_UNLOCK_ALL (0x0002)
#define ONENAND_HAS_2PLANE (0x0004)
+#define ONENAND_SKIP_UNLOCK_CHECK (0x0100)
#define ONENAND_PAGEBUF_ALLOC (0x1000)
#define ONENAND_OOBBUF_ALLOC (0x2000)
@@ -176,6 +196,7 @@
* OneNAND Flash Manufacturer ID Codes
*/
#define ONENAND_MFR_SAMSUNG 0xec
+#define ONENAND_MFR_NUMONYX 0x20
/**
* struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
@@ -189,5 +210,8 @@
int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
+unsigned onenand_block(struct onenand_chip *this, loff_t addr);
+loff_t onenand_addr(struct onenand_chip *this, int block);
+int flexonenand_region(struct mtd_info *mtd, loff_t addr);
#endif /* __LINUX_MTD_ONENAND_H */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index 0c6bbe2..86a6bbe 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -67,6 +67,9 @@
/*
* Device ID Register F001h (R)
*/
+#define DEVICE_IS_FLEXONENAND (1 << 9)
+#define FLEXONENAND_PI_MASK (0x3ff)
+#define FLEXONENAND_PI_UNLOCK_SHIFT (14)
#define ONENAND_DEVICE_DENSITY_MASK (0xf)
#define ONENAND_DEVICE_DENSITY_SHIFT (4)
#define ONENAND_DEVICE_IS_DDP (1 << 3)
@@ -84,6 +87,11 @@
#define ONENAND_VERSION_PROCESS_SHIFT (8)
/*
+ * Technology Register F006h (R)
+ */
+#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0)
+
+/*
* Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W)
*/
#define ONENAND_DDP_SHIFT (15)
@@ -93,7 +101,8 @@
/*
* Start Address 8 F107h (R/W)
*/
-#define ONENAND_FPA_MASK (0x3f)
+/* Note: It's actually 0x3f in case of SLC */
+#define ONENAND_FPA_MASK (0x7f)
#define ONENAND_FPA_SHIFT (2)
#define ONENAND_FSA_MASK (0x03)
@@ -105,7 +114,8 @@
#define ONENAND_BSA_BOOTRAM (0 << 2)
#define ONENAND_BSA_DATARAM0 (2 << 2)
#define ONENAND_BSA_DATARAM1 (3 << 2)
-#define ONENAND_BSC_MASK (0x03)
+/* Note: It's actually 0x03 in case of SLC */
+#define ONENAND_BSC_MASK (0x07)
/*
* Command Register F220h (R/W)
@@ -124,9 +134,13 @@
#define ONENAND_CMD_RESET (0xF0)
#define ONENAND_CMD_OTP_ACCESS (0x65)
#define ONENAND_CMD_READID (0x90)
+#define FLEXONENAND_CMD_PI_UPDATE (0x05)
+#define FLEXONENAND_CMD_PI_ACCESS (0x66)
+#define FLEXONENAND_CMD_RECOVER_LSB (0x05)
/* NOTE: Those are not *REAL* commands */
#define ONENAND_CMD_BUFFERRAM (0x1978)
+#define FLEXONENAND_CMD_READ_PI (0x1985)
/*
* System Configuration 1 Register F221h (R, R/W)
@@ -192,10 +206,12 @@
#define ONENAND_ECC_1BIT_ALL (0x5555)
#define ONENAND_ECC_2BIT (1 << 1)
#define ONENAND_ECC_2BIT_ALL (0xAAAA)
+#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010)
/*
* One-Time Programmable (OTP)
*/
+#define FLEXONENAND_OTP_LOCK_OFFSET (2048)
#define ONENAND_OTP_LOCK_OFFSET (14)
#endif /* __ONENAND_REG_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 7535a74..af6dcb9 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -40,7 +40,6 @@
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
- struct mtd_info **mtdp; /* pointer to store the MTD object */
};
#define MTDPART_OFS_NXTBLK (-2)
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 214d499..f387919 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -25,8 +25,9 @@
#define NFSMODE_SOCK 0140000
#define NFSMODE_FIFO 0010000
-#define NFS_MNT_PROGRAM 100005
-#define NFS_MNT_PORT 627
+#define NFS_MNT_PROGRAM 100005
+#define NFS_MNT_VERSION 1
+#define NFS_MNT3_VERSION 3
/*
* NFS stats. The good thing with these values is that NFSv3 errors are
diff --git a/include/linux/nfs2.h b/include/linux/nfs2.h
index 0ed9517..fde24b3 100644
--- a/include/linux/nfs2.h
+++ b/include/linux/nfs2.h
@@ -64,11 +64,4 @@
#define NFSPROC_READDIR 16
#define NFSPROC_STATFS 17
-#define NFS_MNT_PROGRAM 100005
-#define NFS_MNT_VERSION 1
-#define MNTPROC_NULL 0
-#define MNTPROC_MNT 1
-#define MNTPROC_UMNT 3
-#define MNTPROC_UMNTALL 4
-
#endif /* _LINUX_NFS2_H */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index 539f3b5..ac33806 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -88,12 +88,7 @@
#define NFS3PROC_PATHCONF 20
#define NFS3PROC_COMMIT 21
-#define NFS_MNT3_PROGRAM 100005
#define NFS_MNT3_VERSION 3
-#define MOUNTPROC3_NULL 0
-#define MOUNTPROC3_MNT 1
-#define MOUNTPROC3_UMNT 3
-#define MOUNTPROC3_UMNTALL 4
#if defined(__KERNEL__)
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e3f0cbc..bd2eba5 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -21,6 +21,7 @@
#define NFS4_FHSIZE 128
#define NFS4_MAXPATHLEN PATH_MAX
#define NFS4_MAXNAMLEN NAME_MAX
+#define NFS4_OPAQUE_LIMIT 1024
#define NFS4_MAX_SESSIONID_LEN 16
#define NFS4_ACCESS_READ 0x0001
@@ -130,6 +131,16 @@
#define NFS4_MAX_UINT64 (~(u64)0)
+/* An NFS4 sessions server must support at least NFS4_MAX_OPS operations.
+ * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly.
+ */
+#define NFS4_MAX_OPS 8
+
+/* Our NFS4 client back channel server only wants the cb_sequene and the
+ * actual operation per compound
+ */
+#define NFS4_MAX_BACK_CHANNEL_OPS 2
+
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
NFS4_ACL_WHO_OWNER,
@@ -462,6 +473,13 @@
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
#define NFS4_MINOR_VERSION 0
+
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MINOR_VERSION 1
+#else
+#define NFS4_MAX_MINOR_VERSION 0
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFS4_DEBUG 1
/* Index of predefined Linux client operations */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 6ad7594..19fe15d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -4,11 +4,17 @@
#include <linux/list.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
+#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/xprt.h>
#include <asm/atomic.h>
+struct nfs4_session;
struct nfs_iostats;
struct nlm_host;
+struct nfs4_sequence_args;
+struct nfs4_sequence_res;
+struct nfs_server;
/*
* The nfs_client identifies our client state to the server.
@@ -18,6 +24,7 @@
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
+#define NFS_CS_SESSION_INITING 2 /* busy initialising session */
unsigned long cl_res_state; /* NFS resources state */
#define NFS_CS_CALLBACK 1 /* - callback started */
#define NFS_CS_IDMAP 2 /* - idmap started */
@@ -32,6 +39,7 @@
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
int cl_proto; /* Network transport protocol */
+ u32 cl_minorversion;/* NFSv4 minorversion */
struct rpc_cred *cl_machine_cred;
#ifdef CONFIG_NFS_V4
@@ -63,7 +71,22 @@
*/
char cl_ipaddr[48];
unsigned char cl_id_uniquifier;
-#endif
+ int (* cl_call_sync)(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+#endif /* CONFIG_NFS_V4 */
+
+#ifdef CONFIG_NFS_V4_1
+ /* clientid returned from EXCHANGE_ID, used by session operations */
+ u64 cl_ex_clid;
+ /* The sequence id to use for the next CREATE_SESSION */
+ u32 cl_seqid;
+ /* The flags used for obtaining the clientid during EXCHANGE_ID */
+ u32 cl_exchange_flags;
+ struct nfs4_session *cl_session; /* sharred session */
+#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
@@ -145,4 +168,46 @@
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
+
+/* maximum number of slots to use */
+#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
+
+#if defined(CONFIG_NFS_V4_1)
+
+/* Sessions */
+#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
+struct nfs4_slot_table {
+ struct nfs4_slot *slots; /* seqid per slot */
+ unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
+ spinlock_t slot_tbl_lock;
+ struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
+ int max_slots; /* # slots in table */
+ int highest_used_slotid; /* sent to server on each SEQ.
+ * op for dynamic resizing */
+};
+
+static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
+{
+ return sp - tbl->slots;
+}
+
+/*
+ * Session related parameters
+ */
+struct nfs4_session {
+ struct nfs4_sessionid sess_id;
+ u32 flags;
+ unsigned long session_state;
+ u32 hash_alg;
+ u32 ssv_len;
+
+ /* The fore and back channel */
+ struct nfs4_channel_attrs fc_attrs;
+ struct nfs4_slot_table fc_slot_table;
+ struct nfs4_channel_attrs bc_attrs;
+ struct nfs4_slot_table bc_slot_table;
+ struct nfs_client *clp;
+};
+
+#endif /* CONFIG_NFS_V4_1 */
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b89c34e..62f63fb 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -145,6 +145,44 @@
};
struct nfs_seqid;
+
+/* nfs41 sessions channel attributes */
+struct nfs4_channel_attrs {
+ u32 headerpadsz;
+ u32 max_rqst_sz;
+ u32 max_resp_sz;
+ u32 max_resp_sz_cached;
+ u32 max_ops;
+ u32 max_reqs;
+};
+
+/* nfs41 sessions slot seqid */
+struct nfs4_slot {
+ u32 seq_nr;
+};
+
+struct nfs4_sequence_args {
+ struct nfs4_session *sa_session;
+ u8 sa_slotid;
+ u8 sa_cache_this;
+};
+
+struct nfs4_sequence_res {
+ struct nfs4_session *sr_session;
+ u8 sr_slotid; /* slot used to send request */
+ unsigned long sr_renewal_time;
+ int sr_status; /* sequence operation status */
+};
+
+struct nfs4_get_lease_time_args {
+ struct nfs4_sequence_args la_seq_args;
+};
+
+struct nfs4_get_lease_time_res {
+ struct nfs_fsinfo *lr_fsinfo;
+ struct nfs4_sequence_res lr_seq_res;
+};
+
/*
* Arguments to the open call.
*/
@@ -165,6 +203,7 @@
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
__u32 claim;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_openres {
@@ -181,6 +220,7 @@
__u32 do_recall;
__u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -206,6 +246,7 @@
struct nfs_seqid * seqid;
fmode_t fmode;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_closeres {
@@ -213,6 +254,7 @@
struct nfs_fattr * fattr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
* * Arguments to the lock,lockt, and locku call.
@@ -233,12 +275,14 @@
unsigned char block : 1;
unsigned char reclaim : 1;
unsigned char new_lock_owner : 1;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_lock_res {
nfs4_stateid stateid;
struct nfs_seqid * lock_seqid;
struct nfs_seqid * open_seqid;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_locku_args {
@@ -246,32 +290,38 @@
struct file_lock * fl;
struct nfs_seqid * seqid;
nfs4_stateid * stateid;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_locku_res {
nfs4_stateid stateid;
struct nfs_seqid * seqid;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_lockt_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_lowner lock_owner;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_lockt_res {
struct file_lock * denied; /* LOCK, LOCKT failed */
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_delegreturnargs {
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_delegreturnres {
struct nfs_fattr * fattr;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -284,12 +334,14 @@
__u32 count;
unsigned int pgbase;
struct page ** pages;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_readres {
struct nfs_fattr * fattr;
__u32 count;
int eof;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -304,6 +356,7 @@
unsigned int pgbase;
struct page ** pages;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_writeverf {
@@ -316,6 +369,7 @@
struct nfs_writeverf * verf;
__u32 count;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -325,12 +379,14 @@
const struct nfs_fh *fh;
struct qstr name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_removeres {
const struct nfs_server *server;
struct nfs4_change_info cinfo;
struct nfs_fattr dir_attr;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -383,6 +439,7 @@
struct iattr * iap;
const struct nfs_server * server; /* Needed for name mapping */
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_setaclargs {
@@ -390,6 +447,11 @@
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_setaclres {
+ struct nfs4_sequence_res seq_res;
};
struct nfs_getaclargs {
@@ -397,11 +459,18 @@
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_getaclres {
+ size_t acl_len;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_setattrres {
struct nfs_fattr * fattr;
const struct nfs_server * server;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_linkargs {
@@ -583,6 +652,7 @@
const struct nfs_fh * fh;
const u32 * bitmask;
u32 access;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_accessres {
@@ -590,6 +660,7 @@
struct nfs_fattr * fattr;
u32 supported;
u32 access;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_create_arg {
@@ -609,6 +680,7 @@
const struct iattr * attrs;
const struct nfs_fh * dir_fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_create_res {
@@ -617,21 +689,30 @@
struct nfs_fattr * fattr;
struct nfs4_change_info dir_cinfo;
struct nfs_fattr * dir_fattr;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_fsinfo_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_fsinfo_res {
+ struct nfs_fsinfo *fsinfo;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_getattr_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_getattr_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_link_arg {
@@ -639,6 +720,7 @@
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_link_res {
@@ -646,6 +728,7 @@
struct nfs_fattr * fattr;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
+ struct nfs4_sequence_res seq_res;
};
@@ -653,21 +736,30 @@
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_lookup_root_arg {
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_pathconf_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_pathconf_res {
+ struct nfs_pathconf *pathconf;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_readdir_arg {
@@ -678,11 +770,13 @@
struct page ** pages; /* zero-copy data */
unsigned int pgbase; /* zero-copy data */
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_readdir_res {
nfs4_verifier verifier;
unsigned int pgbase;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_readlink {
@@ -690,6 +784,11 @@
unsigned int pgbase;
unsigned int pglen; /* zero-copy data */
struct page ** pages; /* zero-copy data */
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_readlink_res {
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_rename_arg {
@@ -698,6 +797,7 @@
const struct qstr * old_name;
const struct qstr * new_name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_rename_res {
@@ -706,6 +806,7 @@
struct nfs_fattr * old_fattr;
struct nfs4_change_info new_cinfo;
struct nfs_fattr * new_fattr;
+ struct nfs4_sequence_res seq_res;
};
#define NFS4_SETCLIENTID_NAMELEN (127)
@@ -724,6 +825,17 @@
struct nfs4_statfs_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_statfs_res {
+ struct nfs_fsstat *fsstat;
+ struct nfs4_sequence_res seq_res;
+};
+
+struct nfs4_server_caps_arg {
+ struct nfs_fh *fhandle;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_server_caps_res {
@@ -731,6 +843,7 @@
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_string {
@@ -765,10 +878,68 @@
const struct qstr *name;
struct page *page;
const u32 *bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_fs_locations_res {
+ struct nfs4_fs_locations *fs_locations;
+ struct nfs4_sequence_res seq_res;
};
#endif /* CONFIG_NFS_V4 */
+struct nfstime4 {
+ u64 seconds;
+ u32 nseconds;
+};
+
+#ifdef CONFIG_NFS_V4_1
+struct nfs_impl_id4 {
+ u32 domain_len;
+ char *domain;
+ u32 name_len;
+ char *name;
+ struct nfstime4 date;
+};
+
+#define NFS4_EXCHANGE_ID_LEN (48)
+struct nfs41_exchange_id_args {
+ struct nfs_client *client;
+ nfs4_verifier *verifier;
+ unsigned int id_len;
+ char id[NFS4_EXCHANGE_ID_LEN];
+ u32 flags;
+};
+
+struct server_owner {
+ uint64_t minor_id;
+ uint32_t major_id_sz;
+ char major_id[NFS4_OPAQUE_LIMIT];
+};
+
+struct server_scope {
+ uint32_t server_scope_sz;
+ char server_scope[NFS4_OPAQUE_LIMIT];
+};
+
+struct nfs41_exchange_id_res {
+ struct nfs_client *client;
+ u32 flags;
+};
+
+struct nfs41_create_session_args {
+ struct nfs_client *client;
+ uint32_t flags;
+ uint32_t cb_program;
+ struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
+ struct nfs4_channel_attrs bc_attrs; /* Back Channel */
+};
+
+struct nfs41_create_session_res {
+ struct nfs_client *client;
+};
+#endif /* CONFIG_NFS_V4_1 */
+
struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
index 5bccaab..3a3f589 100644
--- a/include/linux/nfsd/cache.h
+++ b/include/linux/nfsd/cache.h
@@ -14,8 +14,7 @@
#include <linux/uio.h>
/*
- * Representation of a reply cache entry. The first two members *must*
- * be hash_next and hash_prev.
+ * Representation of a reply cache entry.
*/
struct svc_cacherep {
struct hlist_node c_hash;
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index afa1901..8f641c9 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -151,9 +151,15 @@
__u64 fh_pre_size; /* size before operation */
struct timespec fh_pre_mtime; /* mtime before oper */
struct timespec fh_pre_ctime; /* ctime before oper */
+ /*
+ * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode)
+ * to find out if it is valid.
+ */
+ u64 fh_pre_change;
/* Post-op attributes saved in fh_unlock */
struct kstat fh_post_attr; /* full attrs after operation */
+ u64 fh_post_change; /* nfsv4 change; see above */
#endif /* CONFIG_NFSD_V3 */
} svc_fh;
@@ -298,6 +304,7 @@
fhp->fh_pre_mtime = inode->i_mtime;
fhp->fh_pre_ctime = inode->i_ctime;
fhp->fh_pre_size = inode->i_size;
+ fhp->fh_pre_change = inode->i_version;
fhp->fh_pre_saved = 1;
}
}
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 4d61c87..57ab2ed 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -41,7 +41,6 @@
#include <linux/kref.h>
#include <linux/sunrpc/clnt.h>
-#define NFS4_OPAQUE_LIMIT 1024
typedef struct {
u32 cl_boot;
u32 cl_id;
@@ -61,15 +60,6 @@
#define si_stateownerid si_opaque.so_stateownerid
#define si_fileid si_opaque.so_fileid
-
-struct nfs4_cb_recall {
- u32 cbr_ident;
- int cbr_trunc;
- stateid_t cbr_stateid;
- struct knfsd_fh cbr_fh;
- struct nfs4_delegation *cbr_dp;
-};
-
struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
@@ -81,22 +71,25 @@
struct file *dl_vfs_file;
u32 dl_type;
time_t dl_time;
- struct nfs4_cb_recall dl_recall;
+/* For recall: */
+ u32 dl_ident;
+ stateid_t dl_stateid;
+ struct knfsd_fh dl_fh;
+ int dl_retries;
};
-#define dl_stateid dl_recall.cbr_stateid
-#define dl_fh dl_recall.cbr_fh
-
/* client delegation callback info */
-struct nfs4_callback {
+struct nfs4_cb_conn {
/* SETCLIENTID info */
u32 cb_addr;
unsigned short cb_port;
u32 cb_prog;
- u32 cb_ident;
+ u32 cb_minorversion;
+ u32 cb_ident; /* minorversion 0 only */
/* RPC client info */
atomic_t cb_set; /* successful CB_NULL call */
struct rpc_clnt * cb_client;
+ struct rpc_cred * cb_cred;
};
/* Maximum number of slots per session. 128 is useful for long haul TCP */
@@ -122,6 +115,17 @@
struct nfsd4_cache_entry sl_cache_entry;
};
+struct nfsd4_channel_attrs {
+ u32 headerpadsz;
+ u32 maxreq_sz;
+ u32 maxresp_sz;
+ u32 maxresp_cached;
+ u32 maxops;
+ u32 maxreqs;
+ u32 nr_rdma_attrs;
+ u32 rdma_attrs;
+};
+
struct nfsd4_session {
struct kref se_ref;
struct list_head se_hash; /* hash by sessionid */
@@ -129,11 +133,8 @@
u32 se_flags;
struct nfs4_client *se_client; /* for expire_client */
struct nfs4_sessionid se_sessionid;
- u32 se_fmaxreq_sz;
- u32 se_fmaxresp_sz;
- u32 se_fmaxresp_cached;
- u32 se_fmaxops;
- u32 se_fnumslots;
+ struct nfsd4_channel_attrs se_fchannel;
+ struct nfsd4_channel_attrs se_bchannel;
struct nfsd4_slot se_slots[]; /* forward channel slots */
};
@@ -185,7 +186,7 @@
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
- struct nfs4_callback cl_callback; /* callback info */
+ struct nfs4_cb_conn cl_cb_conn; /* callback info */
atomic_t cl_count; /* ref count */
u32 cl_firststate; /* recovery dir creation */
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index f80d601..2bacf75 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -64,10 +64,13 @@
struct nfsd4_change_info {
u32 atomic;
+ bool change_supported;
u32 before_ctime_sec;
u32 before_ctime_nsec;
+ u64 before_change;
u32 after_ctime_sec;
u32 after_ctime_nsec;
+ u64 after_change;
};
struct nfsd4_access {
@@ -363,17 +366,6 @@
int spa_how;
};
-struct nfsd4_channel_attrs {
- u32 headerpadsz;
- u32 maxreq_sz;
- u32 maxresp_sz;
- u32 maxresp_cached;
- u32 maxops;
- u32 maxreqs;
- u32 nr_rdma_attrs;
- u32 rdma_attrs;
-};
-
struct nfsd4_create_session {
clientid_t clientid;
struct nfs4_sessionid sessionid;
@@ -503,10 +495,16 @@
{
BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
cinfo->atomic = 1;
- cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
- cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
- cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
- cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
+ if (cinfo->change_supported) {
+ cinfo->before_change = fhp->fh_pre_change;
+ cinfo->after_change = fhp->fh_post_change;
+ } else {
+ cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
+ cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
+ cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
+ cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ }
}
int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 092e82e..93a7c08 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -15,7 +15,7 @@
{
struct pci_bus *pbus = pdev->bus;
/* Find a PCI root bus */
- while (pbus->parent)
+ while (!pci_is_root_bus(pbus))
pbus = pbus->parent;
return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
pbus->number);
@@ -23,7 +23,7 @@
static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
{
- if (pbus->parent)
+ if (!pci_is_root_bus(pbus))
return DEVICE_ACPI_HANDLE(&(pbus->self->dev));
return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
pbus->number);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8e366bb..d304ddf 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -196,6 +196,7 @@
struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
+struct pci_ats;
/*
* The pci_dev structure is used to describe PCI devices.
@@ -293,6 +294,7 @@
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */
};
+ struct pci_ats *ats; /* Address Translation Service */
#endif
};
@@ -607,8 +609,6 @@
struct pci_dev __deprecated *pci_find_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from);
-struct pci_dev __deprecated *pci_find_slot(unsigned int bus,
- unsigned int devfn);
#endif /* CONFIG_PCI_LEGACY */
enum pci_lost_interrupt_reason {
@@ -647,6 +647,7 @@
int where, u16 val);
int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 val);
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
{
@@ -711,8 +712,8 @@
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
+int __pci_reset_function(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
-int pci_execute_reset_function(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
@@ -732,7 +733,7 @@
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
int pci_wake_from_d3(struct pci_dev *dev, bool enable);
pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
@@ -798,7 +799,7 @@
int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
int pass);
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
int pci_cfg_space_size_ext(struct pci_dev *dev);
int pci_cfg_space_size(struct pci_dev *dev);
@@ -888,6 +889,17 @@
extern int pcie_aspm_enabled(void);
#endif
+#ifndef CONFIG_PCIE_ECRC
+static inline void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ return;
+}
+static inline void pcie_ecrc_get_policy(char *str) {};
+#else
+extern void pcie_set_ecrc_checking(struct pci_dev *dev);
+extern void pcie_ecrc_get_policy(char *str);
+#endif
+
#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
#ifdef CONFIG_HT_IRQ
@@ -944,12 +956,6 @@
return NULL;
}
-static inline struct pci_dev *pci_find_slot(unsigned int bus,
- unsigned int devfn)
-{
- return NULL;
-}
-
static inline struct pci_dev *pci_get_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
@@ -1105,6 +1111,10 @@
#include <asm/pci.h>
+#ifndef PCIBIOS_MAX_MEM_32
+#define PCIBIOS_MAX_MEM_32 (-1)
+#endif
+
/* these helpers provide future and backwards compatibility
* for accessing popular PCI BAR info */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
@@ -1261,5 +1271,10 @@
}
#endif
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
+extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
+#endif
+
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 2099874..4391741 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -66,17 +66,10 @@
PCIE_LNK_SPEED_UNKNOWN = 0xFF,
};
-struct hotplug_slot;
-struct hotplug_slot_attribute {
- struct attribute attr;
- ssize_t (*show)(struct hotplug_slot *, char *);
- ssize_t (*store)(struct hotplug_slot *, const char *, size_t);
-};
-#define to_hotplug_attr(n) container_of(n, struct hotplug_slot_attribute, attr);
-
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
* @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
* @enable_slot: Called when the user wants to enable a specific pci slot
* @disable_slot: Called when the user wants to disable a specific pci slot
* @set_attention_status: Called to set the specific slot's attention LED to
@@ -109,6 +102,7 @@
*/
struct hotplug_slot_ops {
struct module *owner;
+ const char *mod_name;
int (*enable_slot) (struct hotplug_slot *slot);
int (*disable_slot) (struct hotplug_slot *slot);
int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -167,12 +161,21 @@
return pci_slot_name(slot->pci_slot);
}
-extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr,
- const char *name);
+extern int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus,
+ int nr, const char *name,
+ struct module *owner, const char *mod_name);
extern int pci_hp_deregister(struct hotplug_slot *slot);
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
struct hotplug_slot_info *info);
+static inline int pci_hp_register(struct hotplug_slot *slot,
+ struct pci_bus *pbus,
+ int devnr, const char *name)
+{
+ return __pci_hp_register(slot, pbus, devnr, name,
+ THIS_MODULE, KBUILD_MODNAME);
+}
+
/* PCI Setting Record (Type 0) */
struct hpp_type0 {
u32 revision;
@@ -226,7 +229,6 @@
extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
struct hotplug_params *hpp);
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
-int acpi_root_bridge(acpi_handle handle);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(struct pci_bus *pbus);
#endif
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 616bf8b..fcaee42 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -295,8 +295,9 @@
#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
+#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
#define PCI_MSIX_FLAGS 2
@@ -304,7 +305,6 @@
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_MASKALL (1 << 14)
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
-#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
/* CompactPCI Hotswap Register */
@@ -502,6 +502,7 @@
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
#define PCI_EXT_CAP_ID_ARI 14
+#define PCI_EXT_CAP_ID_ATS 15
#define PCI_EXT_CAP_ID_SRIOV 16
/* Advanced Error Reporting */
@@ -620,6 +621,15 @@
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
+/* Address Translation Service */
+#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
+#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
+#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
+#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
+#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
+#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
+#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
+
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 4bc2412..065a365 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -83,4 +83,78 @@
extern struct posix_acl *get_posix_acl(struct inode *, int);
extern int set_posix_acl(struct inode *, int, struct posix_acl *);
+#ifdef CONFIG_FS_POSIX_ACL
+static inline struct posix_acl *get_cached_acl(struct inode *inode, int type)
+{
+ struct posix_acl **p, *acl;
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ p = &inode->i_acl;
+ break;
+ case ACL_TYPE_DEFAULT:
+ p = &inode->i_default_acl;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ acl = ACCESS_ONCE(*p);
+ if (acl) {
+ spin_lock(&inode->i_lock);
+ acl = *p;
+ if (acl != ACL_NOT_CACHED)
+ acl = posix_acl_dup(acl);
+ spin_unlock(&inode->i_lock);
+ }
+ return acl;
+}
+
+static inline void set_cached_acl(struct inode *inode,
+ int type,
+ struct posix_acl *acl)
+{
+ struct posix_acl *old = NULL;
+ spin_lock(&inode->i_lock);
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ old = inode->i_acl;
+ inode->i_acl = posix_acl_dup(acl);
+ break;
+ case ACL_TYPE_DEFAULT:
+ old = inode->i_default_acl;
+ inode->i_default_acl = posix_acl_dup(acl);
+ break;
+ }
+ spin_unlock(&inode->i_lock);
+ if (old != ACL_NOT_CACHED)
+ posix_acl_release(old);
+}
+
+static inline void forget_cached_acl(struct inode *inode, int type)
+{
+ struct posix_acl *old = NULL;
+ spin_lock(&inode->i_lock);
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ old = inode->i_acl;
+ inode->i_acl = ACL_NOT_CACHED;
+ break;
+ case ACL_TYPE_DEFAULT:
+ old = inode->i_default_acl;
+ inode->i_default_acl = ACL_NOT_CACHED;
+ break;
+ }
+ spin_unlock(&inode->i_lock);
+ if (old != ACL_NOT_CACHED)
+ posix_acl_release(old);
+}
+#endif
+
+static inline void cache_no_acl(struct inode *inode)
+{
+#ifdef CONFIG_FS_POSIX_ACL
+ inode->i_acl = NULL;
+ inode->i_default_acl = NULL;
+#endif
+}
+
#endif /* __LINUX_POSIX_ACL_H */
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h
index 8cc6575..b444885 100644
--- a/include/linux/reiserfs_acl.h
+++ b/include/linux/reiserfs_acl.h
@@ -56,15 +56,6 @@
extern struct xattr_handler reiserfs_posix_acl_default_handler;
extern struct xattr_handler reiserfs_posix_acl_access_handler;
-static inline void reiserfs_init_acl_access(struct inode *inode)
-{
- REISERFS_I(inode)->i_acl_access = NULL;
-}
-
-static inline void reiserfs_init_acl_default(struct inode *inode)
-{
- REISERFS_I(inode)->i_acl_default = NULL;
-}
#else
#define reiserfs_cache_default_acl(inode) 0
@@ -86,12 +77,4 @@
{
return 0;
}
-
-static inline void reiserfs_init_acl_access(struct inode *inode)
-{
-}
-
-static inline void reiserfs_init_acl_default(struct inode *inode)
-{
-}
#endif
diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h
index 76360b3..89f4d3a 100644
--- a/include/linux/reiserfs_fs_i.h
+++ b/include/linux/reiserfs_fs_i.h
@@ -54,10 +54,6 @@
unsigned int i_trans_id;
struct reiserfs_journal_list *i_jl;
struct mutex i_mmap;
-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
- struct posix_acl *i_acl_access;
- struct posix_acl *i_acl_default;
-#endif
#ifdef CONFIG_REISERFS_FS_XATTR
struct rw_semaphore i_xattr_sem;
#endif
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 16e39c7..e73e242 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -160,8 +160,9 @@
* the rfkill structure. Before calling this function the driver needs
* to be ready to service method calls from rfkill.
*
- * If the software blocked state is not set before registration,
- * set_block will be called to initialize it to a default value.
+ * If rfkill_init_sw_state() is not called before registration,
+ * set_block() will be called to initialize the software blocked state
+ * to a default value.
*
* If the hardware blocked state is not set before registration,
* it is assumed to be unblocked.
@@ -234,9 +235,11 @@
* rfkill drivers that get events when the soft-blocked state changes
* (yes, some platforms directly act on input but allow changing again)
* use this function to notify the rfkill core (and through that also
- * userspace) of the current state. It is not necessary to notify on
- * resume; since hibernation can always change the soft-blocked state,
- * the rfkill core will unconditionally restore the previous state.
+ * userspace) of the current state.
+ *
+ * Drivers should also call this function after resume if the state has
+ * been changed by the user. This only makes sense for "persistent"
+ * devices (see rfkill_init_sw_state()).
*
* This function can be called in any context, even from within rfkill
* callbacks.
@@ -247,6 +250,22 @@
bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
/**
+ * rfkill_init_sw_state - Initialize persistent software block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current software block state to set
+ *
+ * rfkill drivers that preserve their software block state over power off
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of their initial state. It should only be used before
+ * registration.
+ *
+ * In addition, it marks the device as "persistent", an attribute which
+ * can be read by userspace. Persistent devices are expected to preserve
+ * their own state when suspended.
+ */
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
+
+/**
* rfkill_set_states - Set the internal rfkill block states
* @rfkill: pointer to the rfkill class to modify.
* @sw: the current software block state to set
@@ -307,6 +326,10 @@
return blocked;
}
+static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+}
+
static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
{
}
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 216d024..bf116d0 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -118,7 +118,14 @@
#define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0)
-#define page_referenced(page, locked, cnt, flags) TestClearPageReferenced(page)
+static inline int page_referenced(struct page *page, int is_locked,
+ struct mem_cgroup *cnt,
+ unsigned long *vm_flags)
+{
+ *vm_flags = 0;
+ return TestClearPageReferenced(page);
+}
+
#define try_to_unmap(page, refs) SWAP_FAIL
static inline int page_mkclean(struct page *page)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 6fd80c4..23d2fb0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -171,6 +171,9 @@
/* Timberdale UART */
#define PORT_TIMBUART 87
+/* Qualcomm MSM SoCs */
+#define PORT_MSM 88
+
#ifdef __KERNEL__
#include <linux/compiler.h>
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h
index 96c0d93..850db2e 100644
--- a/include/linux/serial_reg.h
+++ b/include/linux/serial_reg.h
@@ -323,6 +323,7 @@
#define UART_OMAP_MVER 0x14 /* Module version register */
#define UART_OMAP_SYSC 0x15 /* System configuration register */
#define UART_OMAP_SYSS 0x16 /* System status register */
+#define UART_OMAP_WER 0x17 /* Wake-up enable register */
#endif /* _LINUX_SERIAL_REG_H */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index fd83f25..abff6c9 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -19,10 +19,6 @@
swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */
struct list_head swaplist; /* chain of maybes on swap */
struct inode vfs_inode;
-#ifdef CONFIG_TMPFS_POSIX_ACL
- struct posix_acl *i_acl;
- struct posix_acl *i_default_acl;
-#endif
};
struct shmem_sb_info {
@@ -45,7 +41,6 @@
#ifdef CONFIG_TMPFS_POSIX_ACL
int shmem_permission(struct inode *, int);
int shmem_acl_init(struct inode *, struct inode *);
-void shmem_acl_destroy_inode(struct inode *);
extern struct xattr_handler shmem_xattr_acl_access_handler;
extern struct xattr_handler shmem_xattr_acl_default_handler;
@@ -57,9 +52,6 @@
{
return 0;
}
-static inline void shmem_acl_destroy_inode(struct inode *inode)
-{
-}
#endif /* CONFIG_TMPFS_POSIX_ACL */
#endif
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
new file mode 100644
index 0000000..6508f0d
--- /dev/null
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+
+(c) 2008 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * Functions to create and manage the backchannel
+ */
+
+#ifndef _LINUX_SUNRPC_BC_XPRT_H
+#define _LINUX_SUNRPC_BC_XPRT_H
+
+#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+
+#ifdef CONFIG_NFS_V4_1
+struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt);
+void xprt_free_bc_request(struct rpc_rqst *req);
+int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
+void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs);
+void bc_release_request(struct rpc_task *);
+int bc_send(struct rpc_rqst *req);
+#else /* CONFIG_NFS_V4_1 */
+static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
+ unsigned int min_reqs)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+#endif /* _LINUX_SUNRPC_BC_XPRT_H */
+
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index c39a210..37881f1a 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -143,6 +143,7 @@
const struct rpc_message *msg, int flags);
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int flags);
+void rpc_restart_call_prepare(struct rpc_task *);
void rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 64981a2..4010977 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -210,6 +210,8 @@
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ const struct rpc_call_ops *ops);
void rpc_put_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
@@ -237,6 +239,7 @@
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
+void rpc_prepare_task(struct rpc_task *task);
static inline void rpc_exit(struct rpc_task *task, int status)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 2a30775..ea80096 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -96,6 +96,15 @@
svc_thread_fn sv_function; /* main function for threads */
unsigned int sv_drc_max_pages; /* Total pages for DRC */
unsigned int sv_drc_pages_used;/* DRC pages used */
+#if defined(CONFIG_NFS_V4_1)
+ struct list_head sv_cb_list; /* queue for callback requests
+ * that arrive over the same
+ * connection */
+ spinlock_t sv_cb_lock; /* protects the svc_cb_list */
+ wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
+ * entries in the svc_cb_list */
+ struct svc_xprt *bc_xprt;
+#endif /* CONFIG_NFS_V4_1 */
};
/*
@@ -411,6 +420,8 @@
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
int svc_process(struct svc_rqst *);
+int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
+ struct svc_rqst *);
int svc_register(const struct svc_serv *, const int,
const unsigned short, const unsigned short);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0d9cb6e..2223ae0 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -83,7 +83,7 @@
int svc_print_xprts(char *buf, int maxlen);
struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
const sa_family_t af, const unsigned short port);
-int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen);
+int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen);
static inline void svc_xprt_get(struct svc_xprt *xprt)
{
@@ -118,7 +118,7 @@
return 0;
}
-static inline size_t svc_addr_len(struct sockaddr *sa)
+static inline size_t svc_addr_len(const struct sockaddr *sa)
{
switch (sa->sa_family) {
case AF_INET:
@@ -126,7 +126,8 @@
case AF_INET6:
return sizeof(struct sockaddr_in6);
}
- return -EAFNOSUPPORT;
+
+ return 0;
}
static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt)
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 483e103..04dba23 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -38,10 +38,15 @@
int svc_send(struct svc_rqst *);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
-int svc_sock_names(char *buf, struct svc_serv *serv, char *toclose);
-int svc_addsock(struct svc_serv *serv, int fd, char *name_return);
+int svc_sock_names(struct svc_serv *serv, char *buf,
+ const size_t buflen,
+ const char *toclose);
+int svc_addsock(struct svc_serv *serv, const int fd,
+ char *name_return, const size_t len);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
+struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
+void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 08afe43..1175d58 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -67,7 +67,8 @@
struct rpc_task * rq_task; /* RPC task data */
__be32 rq_xid; /* request XID */
int rq_cong; /* has incremented xprt->cong */
- int rq_received; /* receive completed */
+ int rq_reply_bytes_recvd; /* number of reply */
+ /* bytes received */
u32 rq_seqno; /* gss seq no. used on req. */
int rq_enc_pages_num;
struct page **rq_enc_pages; /* scratch pages for use by
@@ -97,6 +98,12 @@
unsigned long rq_xtime; /* when transmitted */
int rq_ntrans;
+
+#if defined(CONFIG_NFS_V4_1)
+ struct list_head rq_bc_list; /* Callback service list */
+ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
+ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
+#endif /* CONFIG_NFS_V4_1 */
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
@@ -174,6 +181,15 @@
spinlock_t reserve_lock; /* lock slot table */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
+#if defined(CONFIG_NFS_V4_1)
+ struct svc_serv *bc_serv; /* The RPC service which will */
+ /* process the callback */
+ unsigned int bc_alloc_count; /* Total number of preallocs */
+ spinlock_t bc_pa_lock; /* Protects the preallocated
+ * items */
+ struct list_head bc_pa_list; /* List of preallocated
+ * backchannel rpc_rqst's */
+#endif /* CONFIG_NFS_V4_1 */
struct list_head recv;
struct {
@@ -192,6 +208,26 @@
const char *address_strings[RPC_DISPLAY_MAX];
};
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Backchannel flags
+ */
+#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */
+ /* buffer in use */
+#endif /* CONFIG_NFS_V4_1 */
+
+#if defined(CONFIG_NFS_V4_1)
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+}
+#else
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
struct xprt_create {
int ident; /* XPRT_TRANSPORT identifier */
struct sockaddr * srcaddr; /* optional local address */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c88b366..7c15334 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -298,8 +298,8 @@
struct backing_dev_info;
/* linux/mm/thrash.c */
-extern struct mm_struct * swap_token_mm;
-extern void grab_swap_token(void);
+extern struct mm_struct *swap_token_mm;
+extern void grab_swap_token(struct mm_struct *);
extern void __put_swap_token(struct mm_struct *);
static inline int has_swap_token(struct mm_struct *mm)
@@ -419,10 +419,10 @@
}
/* linux/mm/thrash.c */
-#define put_swap_token(x) do { } while(0)
-#define grab_swap_token() do { } while(0)
-#define has_swap_token(x) 0
-#define disable_swap_token() do { } while(0)
+#define put_swap_token(mm) do { } while (0)
+#define grab_swap_token(mm) do { } while (0)
+#define has_swap_token(mm) 0
+#define disable_swap_token() do { } while (0)
static inline void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
diff --git a/include/linux/timer.h b/include/linux/timer.h
index ccf882e..be62ec2 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -190,6 +190,8 @@
*/
#ifdef CONFIG_TIMER_STATS
+extern int timer_stats_active;
+
#define TIMER_STATS_FLAG_DEFERRABLE 0x1
extern void init_timer_stats(void);
@@ -203,6 +205,8 @@
static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
{
+ if (likely(!timer_stats_active))
+ return;
__timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
}
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index f24ecee..8a025d5 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -348,6 +348,7 @@
#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
+#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
/*
@@ -894,9 +895,10 @@
V4L2_COLORFX_BW = 1,
V4L2_COLORFX_SEPIA = 2,
};
+#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
/* last CID + 1 */
-#define V4L2_CID_LASTP1 (V4L2_CID_BASE+32)
+#define V4L2_CID_LASTP1 (V4L2_CID_BASE+33)
/* MPEG-class control IDs defined by V4L2 */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index 7b5b91f..9dcb632 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -162,6 +162,8 @@
extern IR_KEYTAB_TYPE ir_codes_kworld_plus_tv_analog[IR_KEYTAB_SIZE];
extern IR_KEYTAB_TYPE ir_codes_kaiomy[IR_KEYTAB_SIZE];
extern IR_KEYTAB_TYPE ir_codes_dm1105_nec[IR_KEYTAB_SIZE];
+extern IR_KEYTAB_TYPE ir_codes_evga_indtube[IR_KEYTAB_SIZE];
+
#endif
/*
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index c48c24e..33a1842 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -153,6 +153,22 @@
struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev,
struct i2c_adapter *adapter,
const char *module_name, const char *client_type, u8 addr);
+
+/* Load an i2c module and return an initialized v4l2_subdev struct.
+ Only call request_module if module_name != NULL.
+ The client_type argument is the name of the chip that's on the adapter. */
+struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter,
+ const char *module_name, const char *client_type,
+ int irq, void *platform_data,
+ u8 addr, const unsigned short *probe_addrs);
+
+struct i2c_board_info;
+
+struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
+ struct i2c_adapter *adapter, const char *module_name,
+ struct i2c_board_info *info, const unsigned short *probe_addrs);
+
/* Initialize an v4l2_subdev with data from an i2c_client struct */
void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
const struct v4l2_subdev_ops *ops);
@@ -193,4 +209,14 @@
u32 output;
};
+/* ------------------------------------------------------------------------- */
+
+/* Miscellaneous helper functions */
+
+void v4l_bound_align_image(unsigned int *w, unsigned int wmin,
+ unsigned int wmax, unsigned int walign,
+ unsigned int *h, unsigned int hmin,
+ unsigned int hmax, unsigned int halign,
+ unsigned int salign);
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-i2c-drv.h b/include/media/v4l2-i2c-drv.h
index 10a2882..74bf741 100644
--- a/include/media/v4l2-i2c-drv.h
+++ b/include/media/v4l2-i2c-drv.h
@@ -22,7 +22,7 @@
*/
/* NOTE: the full version of this header is in the v4l-dvb repository
- * and allows v4l i2c drivers to be compiled on older kernels as well.
+ * and allows v4l i2c drivers to be compiled on pre-2.6.26 kernels.
* The version of this header as it appears in the kernel is a stripped
* version (without all the backwards compatibility stuff) and so it
* looks a bit odd.
@@ -30,6 +30,9 @@
* If you look at the full version then you will understand the reason
* for introducing this header since you really don't want to have all
* the tricky backwards compatibility code in each and every i2c driver.
+ *
+ * If the i2c driver will never be compiled for pre-2.6.26 kernels, then
+ * DO NOT USE this header! Just write it as a regular i2c driver.
*/
#ifndef __V4L2_I2C_DRV_H__
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index a503e1c..5dcb367 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -79,7 +79,11 @@
not yet implemented) since ops provide proper type-checking.
*/
-/* init: initialize the sensor registors to some sort of reasonable default
+/* s_config: if set, then it is always called by the v4l2_i2c_new_subdev*
+ functions after the v4l2_subdev was registered. It is used to pass
+ platform data to the subdev which can be used during initialization.
+
+ init: initialize the sensor registors to some sort of reasonable default
values. Do not use for new drivers and should be removed in existing
drivers.
@@ -96,6 +100,7 @@
struct v4l2_subdev_core_ops {
int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip);
int (*log_status)(struct v4l2_subdev *sd);
+ int (*s_config)(struct v4l2_subdev *sd, int irq, void *platform_data);
int (*init)(struct v4l2_subdev *sd, u32 val);
int (*load_fw)(struct v4l2_subdev *sd);
int (*reset)(struct v4l2_subdev *sd, u32 val);
diff --git a/include/mtd/Kbuild b/include/mtd/Kbuild
index 8eb018f..192f8fb 100644
--- a/include/mtd/Kbuild
+++ b/include/mtd/Kbuild
@@ -1,5 +1,4 @@
header-y += inftl-user.h
-header-y += jffs2-user.h
header-y += mtd-abi.h
header-y += mtd-user.h
header-y += nftl-user.h
diff --git a/include/mtd/jffs2-user.h b/include/mtd/jffs2-user.h
deleted file mode 100644
index fa94b0e..0000000
--- a/include/mtd/jffs2-user.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * JFFS2 definitions for use in user space only
- */
-
-#ifndef __JFFS2_USER_H__
-#define __JFFS2_USER_H__
-
-/* This file is blessed for inclusion by userspace */
-#include <linux/jffs2.h>
-#include <linux/types.h>
-#include <endian.h>
-#include <byteswap.h>
-
-#undef cpu_to_je16
-#undef cpu_to_je32
-#undef cpu_to_jemode
-#undef je16_to_cpu
-#undef je32_to_cpu
-#undef jemode_to_cpu
-
-extern int target_endian;
-
-#define t16(x) ({ __u16 __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_16(__b); })
-#define t32(x) ({ __u32 __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_32(__b); })
-
-#define cpu_to_je16(x) ((jint16_t){t16(x)})
-#define cpu_to_je32(x) ((jint32_t){t32(x)})
-#define cpu_to_jemode(x) ((jmode_t){t32(x)})
-
-#define je16_to_cpu(x) (t16((x).v16))
-#define je32_to_cpu(x) (t32((x).v32))
-#define jemode_to_cpu(x) (t32((x).m))
-
-#endif /* __JFFS2_USER_H__ */
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index b6595b3..be51ae2 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -12,12 +12,24 @@
__u32 length;
};
+struct erase_info_user64 {
+ __u64 start;
+ __u64 length;
+};
+
struct mtd_oob_buf {
__u32 start;
__u32 length;
unsigned char __user *ptr;
};
+struct mtd_oob_buf64 {
+ __u64 start;
+ __u32 pad;
+ __u32 length;
+ __u64 usr_ptr;
+};
+
#define MTD_ABSENT 0
#define MTD_RAM 1
#define MTD_ROM 2
@@ -95,6 +107,9 @@
#define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout)
#define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
#define MTDFILEMODE _IO('M', 19)
+#define MEMERASE64 _IOW('M', 20, struct erase_info_user64)
+#define MEMWRITEOOB64 _IOWR('M', 21, struct mtd_oob_buf64)
+#define MEMREADOOB64 _IOWR('M', 22, struct mtd_oob_buf64)
/*
* Obsolete legacy interface. Keep it in order not to break userspace
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 21ee49f..f82a1e8 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -94,8 +94,6 @@
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int iucv_sock_wait_state(struct sock *sk, int state, int state2,
- unsigned long timeo);
int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
void iucv_accept_unlink(struct sock *sk);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index ffa5b8b..1089d5a 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -53,7 +53,7 @@
void (*err_handler)(struct sk_buff *skb,
struct inet6_skb_parm *opt,
- int type, int code, int offset,
+ u8 type, u8 code, int offset,
__be32 info);
int (*gso_send_check)(struct sk_buff *skb);
diff --git a/include/net/rawv6.h b/include/net/rawv6.h
index 8a22599..f6b9b83 100644
--- a/include/net/rawv6.h
+++ b/include/net/rawv6.h
@@ -6,7 +6,7 @@
#include <net/protocol.h>
void raw6_icmp_error(struct sk_buff *, int nexthdr,
- int type, int code, int inner_offset, __be32);
+ u8 type, u8 code, int inner_offset, __be32);
int raw6_local_deliver(struct sk_buff *, int);
extern int rawv6_rcv(struct sock *sk,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 9f80a76..d16a304 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -448,6 +448,7 @@
{
struct sctp_ulpevent *event = sctp_skb2event(skb);
+ skb_orphan(skb);
skb->sk = sk;
skb->destructor = sctp_sock_rfree;
atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
diff --git a/include/net/sock.h b/include/net/sock.h
index 07133c5..352f06bb 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1252,6 +1252,7 @@
static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
{
+ skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_wfree;
/*
@@ -1264,6 +1265,7 @@
static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
+ skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_rfree;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 736bca4..9e3a3f4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1274,7 +1274,7 @@
struct xfrm6_tunnel {
int (*handler)(struct sk_buff *skb);
int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info);
+ u8 type, u8 code, int offset, __be32 info);
struct xfrm6_tunnel *next;
int priority;
};
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
index 6300f55..a0ff61c 100644
--- a/include/scsi/fc_encode.h
+++ b/include/scsi/fc_encode.h
@@ -107,7 +107,6 @@
break;
default:
- FC_DBG("Invalid op code %x \n", op);
return -EINVAL;
}
*r_ctl = FC_RCTL_DD_UNSOL_CTL;
@@ -298,7 +297,6 @@
break;
default:
- FC_DBG("Invalid op code %x \n", op);
return -EINVAL;
}
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index ebdd9f4..b92584a 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -34,17 +34,72 @@
#include <scsi/fc_frame.h>
-#define LIBFC_DEBUG
+#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */
+#define FC_LPORT_LOGGING 0x02 /* lport layer logging */
+#define FC_DISC_LOGGING 0x04 /* discovery layer logging */
+#define FC_RPORT_LOGGING 0x08 /* rport layer logging */
+#define FC_FCP_LOGGING 0x10 /* I/O path logging */
+#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */
+#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */
+#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */
-#ifdef LIBFC_DEBUG
-/* Log messages */
-#define FC_DBG(fmt, args...) \
- do { \
- printk(KERN_INFO "%s " fmt, __func__, ##args); \
- } while (0)
-#else
-#define FC_DBG(fmt, args...)
-#endif
+extern unsigned int fc_debug_logging;
+
+#define FC_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(fc_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0);
+
+#define FC_LIBFC_DBG(fmt, args...) \
+ FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \
+ printk(KERN_INFO "libfc: " fmt, ##args);)
+
+#define FC_LPORT_DBG(lport, fmt, args...) \
+ FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
+ printk(KERN_INFO "lport: %6x: " fmt, \
+ fc_host_port_id(lport->host), ##args);)
+
+#define FC_DISC_DBG(disc, fmt, args...) \
+ FC_CHECK_LOGGING(FC_DISC_LOGGING, \
+ printk(KERN_INFO "disc: %6x: " fmt, \
+ fc_host_port_id(disc->lport->host), \
+ ##args);)
+
+#define FC_RPORT_DBG(rport, fmt, args...) \
+do { \
+ struct fc_rport_libfc_priv *rdata = rport->dd_data; \
+ struct fc_lport *lport = rdata->local_port; \
+ FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
+ printk(KERN_INFO "rport: %6x: %6x: " fmt, \
+ fc_host_port_id(lport->host), \
+ rport->port_id, ##args);) \
+} while (0);
+
+#define FC_FCP_DBG(pkt, fmt, args...) \
+ FC_CHECK_LOGGING(FC_FCP_LOGGING, \
+ printk(KERN_INFO "fcp: %6x: %6x: " fmt, \
+ fc_host_port_id(pkt->lp->host), \
+ pkt->rport->port_id, ##args);)
+
+#define FC_EM_DBG(em, fmt, args...) \
+ FC_CHECK_LOGGING(FC_EM_LOGGING, \
+ printk(KERN_INFO "em: %6x: " fmt, \
+ fc_host_port_id(em->lp->host), \
+ ##args);)
+
+#define FC_EXCH_DBG(exch, fmt, args...) \
+ FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
+ printk(KERN_INFO "exch: %6x: %4x: " fmt, \
+ fc_host_port_id(exch->lp->host), \
+ exch->xid, ##args);)
+
+#define FC_SCSI_DBG(lport, fmt, args...) \
+ FC_CHECK_LOGGING(FC_SCSI_LOGGING, \
+ printk(KERN_INFO "scsi: %6x: " fmt, \
+ fc_host_port_id(lport->host), ##args);)
/*
* libfc error codes
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 196525c..61afeb5 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -125,6 +125,10 @@
struct scsi_cmnd *sc; /* associated SCSI cmd*/
struct iscsi_conn *conn; /* used connection */
+ /* data processing tracking */
+ unsigned long last_xfer;
+ unsigned long last_timeout;
+ bool have_checked_conn;
/* state set/tested under session->lock */
int state;
atomic_t refcount;
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 1f5ca7f..9fd6702 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -32,5 +32,6 @@
int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req);
int scsi_prep_state_check(struct scsi_device *sdev, struct request *req);
int scsi_prep_return(struct request_queue *q, struct request *req, int ret);
+int scsi_prep_fn(struct request_queue *, struct request *);
#endif /* _SCSI_SCSI_DRIVER_H */
diff --git a/init/main.c b/init/main.c
index 09131ec..2c5ade7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -24,6 +24,7 @@
#include <linux/smp_lock.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
+#include <linux/acpi.h>
#include <linux/tty.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
@@ -88,11 +89,6 @@
extern void prio_tree_init(void);
extern void radix_tree_init(void);
extern void free_initmem(void);
-#ifdef CONFIG_ACPI
-extern void acpi_early_init(void);
-#else
-static inline void acpi_early_init(void) { }
-#endif
#ifndef CONFIG_DEBUG_RODATA
static inline void mark_rodata_ro(void) { }
#endif
@@ -678,7 +674,6 @@
#endif
page_cgroup_init();
enable_debug_pagealloc();
- cpu_hotplug_init();
kmemtrace_init();
kmemleak_init();
debug_objects_mem_init();
diff --git a/kernel/Makefile b/kernel/Makefile
index 0a32cb2..2093a69 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -69,7 +69,7 @@
obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
-obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
+obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
obj-$(CONFIG_GCOV_KERNEL) += gcov/
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
@@ -96,6 +96,7 @@
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/
+obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
diff --git a/kernel/audit.c b/kernel/audit.c
index 9442c35..defc2e6 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -115,9 +115,6 @@
/* The netlink socket. */
static struct sock *audit_sock;
-/* Inotify handle. */
-struct inotify_handle *audit_ih;
-
/* Hash for inode-based rules */
struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
@@ -136,7 +133,7 @@
static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
/* Serialize requests from userspace. */
-static DEFINE_MUTEX(audit_cmd_mutex);
+DEFINE_MUTEX(audit_cmd_mutex);
/* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting
* audit records. Since printk uses a 1024 byte buffer, this buffer
@@ -375,6 +372,25 @@
kfree_skb(skb);
}
+/*
+ * For one reason or another this nlh isn't getting delivered to the userspace
+ * audit daemon, just send it to printk.
+ */
+static void audit_printk_skb(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ char *data = NLMSG_DATA(nlh);
+
+ if (nlh->nlmsg_type != AUDIT_EOE) {
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "type=%d %s\n", nlh->nlmsg_type, data);
+ else
+ audit_log_lost("printk limit exceeded\n");
+ }
+
+ audit_hold_skb(skb);
+}
+
static void kauditd_send_skb(struct sk_buff *skb)
{
int err;
@@ -427,14 +443,8 @@
if (skb) {
if (audit_pid)
kauditd_send_skb(skb);
- else {
- if (printk_ratelimit())
- printk(KERN_NOTICE "%s\n", skb->data + NLMSG_SPACE(0));
- else
- audit_log_lost("printk limit exceeded\n");
-
- audit_hold_skb(skb);
- }
+ else
+ audit_printk_skb(skb);
} else {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_INTERRUPTIBLE);
@@ -495,42 +505,25 @@
return 0;
}
-#ifdef CONFIG_AUDIT_TREE
-static int prune_tree_thread(void *unused)
-{
- mutex_lock(&audit_cmd_mutex);
- audit_prune_trees();
- mutex_unlock(&audit_cmd_mutex);
- return 0;
-}
-
-void audit_schedule_prune(void)
-{
- kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
-}
-#endif
-
struct sk_buff *audit_make_reply(int pid, int seq, int type, int done,
int multi, void *payload, int size)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
- int len = NLMSG_SPACE(size);
void *data;
int flags = multi ? NLM_F_MULTI : 0;
int t = done ? NLMSG_DONE : type;
- skb = alloc_skb(len, GFP_KERNEL);
+ skb = nlmsg_new(size, GFP_KERNEL);
if (!skb)
return NULL;
- nlh = NLMSG_PUT(skb, pid, seq, t, size);
- nlh->nlmsg_flags = flags;
- data = NLMSG_DATA(nlh);
+ nlh = NLMSG_NEW(skb, pid, seq, t, size, flags);
+ data = NLMSG_DATA(nlh);
memcpy(data, payload, size);
return skb;
-nlmsg_failure: /* Used by NLMSG_PUT */
+nlmsg_failure: /* Used by NLMSG_NEW */
if (skb)
kfree_skb(skb);
return NULL;
@@ -926,28 +919,29 @@
}
/*
- * Get message from skb (based on rtnetlink_rcv_skb). Each message is
- * processed by audit_receive_msg. Malformed skbs with wrong length are
- * discarded silently.
+ * Get message from skb. Each message is processed by audit_receive_msg.
+ * Malformed skbs with wrong length are discarded silently.
*/
static void audit_receive_skb(struct sk_buff *skb)
{
- int err;
- struct nlmsghdr *nlh;
- u32 rlen;
+ struct nlmsghdr *nlh;
+ /*
+ * len MUST be signed for NLMSG_NEXT to be able to dec it below 0
+ * if the nlmsg_len was not aligned
+ */
+ int len;
+ int err;
- while (skb->len >= NLMSG_SPACE(0)) {
- nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
- return;
- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
- if (rlen > skb->len)
- rlen = skb->len;
- if ((err = audit_receive_msg(skb, nlh))) {
+ nlh = nlmsg_hdr(skb);
+ len = skb->len;
+
+ while (NLMSG_OK(nlh, len)) {
+ err = audit_receive_msg(skb, nlh);
+ /* if err or if this message says it wants a response */
+ if (err || (nlh->nlmsg_flags & NLM_F_ACK))
netlink_ack(skb, nlh, err);
- } else if (nlh->nlmsg_flags & NLM_F_ACK)
- netlink_ack(skb, nlh, 0);
- skb_pull(skb, rlen);
+
+ nlh = NLMSG_NEXT(nlh, len);
}
}
@@ -959,13 +953,6 @@
mutex_unlock(&audit_cmd_mutex);
}
-#ifdef CONFIG_AUDITSYSCALL
-static const struct inotify_operations audit_inotify_ops = {
- .handle_event = audit_handle_ievent,
- .destroy_watch = audit_free_parent,
-};
-#endif
-
/* Initialize audit support at boot time. */
static int __init audit_init(void)
{
@@ -991,12 +978,6 @@
audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
-#ifdef CONFIG_AUDITSYSCALL
- audit_ih = inotify_init(&audit_inotify_ops);
- if (IS_ERR(audit_ih))
- audit_panic("cannot initialize inotify handle");
-#endif
-
for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
INIT_LIST_HEAD(&audit_inode_hash[i]);
@@ -1070,18 +1051,20 @@
goto err;
}
- ab->skb = alloc_skb(AUDIT_BUFSIZ, gfp_mask);
- if (!ab->skb)
- goto err;
-
ab->ctx = ctx;
ab->gfp_mask = gfp_mask;
- nlh = (struct nlmsghdr *)skb_put(ab->skb, NLMSG_SPACE(0));
- nlh->nlmsg_type = type;
- nlh->nlmsg_flags = 0;
- nlh->nlmsg_pid = 0;
- nlh->nlmsg_seq = 0;
+
+ ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask);
+ if (!ab->skb)
+ goto nlmsg_failure;
+
+ nlh = NLMSG_NEW(ab->skb, 0, 0, type, 0, 0);
+
return ab;
+
+nlmsg_failure: /* Used by NLMSG_NEW */
+ kfree_skb(ab->skb);
+ ab->skb = NULL;
err:
audit_buffer_free(ab);
return NULL;
@@ -1452,6 +1435,15 @@
kfree(pathname);
}
+void audit_log_key(struct audit_buffer *ab, char *key)
+{
+ audit_log_format(ab, " key=");
+ if (key)
+ audit_log_untrustedstring(ab, key);
+ else
+ audit_log_format(ab, "(null)");
+}
+
/**
* audit_log_end - end one audit record
* @ab: the audit_buffer
@@ -1475,15 +1467,7 @@
skb_queue_tail(&audit_skb_queue, ab->skb);
wake_up_interruptible(&kauditd_wait);
} else {
- if (nlh->nlmsg_type != AUDIT_EOE) {
- if (printk_ratelimit()) {
- printk(KERN_NOTICE "type=%d %s\n",
- nlh->nlmsg_type,
- ab->skb->data + NLMSG_SPACE(0));
- } else
- audit_log_lost("printk limit exceeded\n");
- }
- audit_hold_skb(ab->skb);
+ audit_printk_skb(ab->skb);
}
ab->skb = NULL;
}
diff --git a/kernel/audit.h b/kernel/audit.h
index 16f18ca..208687b 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -53,18 +53,7 @@
};
/* Rule lists */
-struct audit_parent;
-
-struct audit_watch {
- atomic_t count; /* reference count */
- char *path; /* insertion path */
- dev_t dev; /* associated superblock device */
- unsigned long ino; /* associated inode number */
- struct audit_parent *parent; /* associated parent */
- struct list_head wlist; /* entry in parent->watches list */
- struct list_head rules; /* associated rules */
-};
-
+struct audit_watch;
struct audit_tree;
struct audit_chunk;
@@ -108,19 +97,28 @@
int audit_send_list(void *);
-struct inotify_watch;
-/* Inotify handle */
-extern struct inotify_handle *audit_ih;
-
-extern void audit_free_parent(struct inotify_watch *);
-extern void audit_handle_ievent(struct inotify_watch *, u32, u32, u32,
- const char *, struct inode *);
extern int selinux_audit_rule_update(void);
extern struct mutex audit_filter_mutex;
extern void audit_free_rule_rcu(struct rcu_head *);
extern struct list_head audit_filter_list[];
+/* audit watch functions */
+extern unsigned long audit_watch_inode(struct audit_watch *watch);
+extern dev_t audit_watch_dev(struct audit_watch *watch);
+extern void audit_put_watch(struct audit_watch *watch);
+extern void audit_get_watch(struct audit_watch *watch);
+extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op);
+extern int audit_add_watch(struct audit_krule *krule);
+extern void audit_remove_watch(struct audit_watch *watch);
+extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list);
+extern void audit_inotify_unregister(struct list_head *in_list);
+extern char *audit_watch_path(struct audit_watch *watch);
+extern struct list_head *audit_watch_rules(struct audit_watch *watch);
+
+extern struct audit_entry *audit_dupe_rule(struct audit_krule *old,
+ struct audit_watch *watch);
+
#ifdef CONFIG_AUDIT_TREE
extern struct audit_chunk *audit_tree_lookup(const struct inode *);
extern void audit_put_chunk(struct audit_chunk *);
@@ -130,10 +128,9 @@
extern int audit_remove_tree_rule(struct audit_krule *);
extern void audit_trim_trees(void);
extern int audit_tag_tree(char *old, char *new);
-extern void audit_schedule_prune(void);
-extern void audit_prune_trees(void);
extern const char *audit_tree_path(struct audit_tree *);
extern void audit_put_tree(struct audit_tree *);
+extern void audit_kill_trees(struct list_head *);
#else
#define audit_remove_tree_rule(rule) BUG()
#define audit_add_tree_rule(rule) -EINVAL
@@ -142,6 +139,7 @@
#define audit_put_tree(tree) (void)0
#define audit_tag_tree(old, new) -EINVAL
#define audit_tree_path(rule) "" /* never called */
+#define audit_kill_trees(list) BUG()
#endif
extern char *audit_unpack_string(void **, size_t *, size_t);
@@ -160,7 +158,10 @@
return 0;
}
extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
+extern struct list_head *audit_killed_trees(void);
#else
#define audit_signal_info(s,t) AUDIT_DISABLED
#define audit_filter_inodes(t,c) AUDIT_DISABLED
#endif
+
+extern struct mutex audit_cmd_mutex;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 1f6396d7..2451dc6 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -2,6 +2,7 @@
#include <linux/inotify.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/kthread.h>
struct audit_tree;
struct audit_chunk;
@@ -441,13 +442,11 @@
if (rule->tree) {
/* not a half-baked one */
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
- audit_log_format(ab, "op=remove rule dir=");
+ audit_log_format(ab, "op=");
+ audit_log_string(ab, "remove rule");
+ audit_log_format(ab, " dir=");
audit_log_untrustedstring(ab, rule->tree->pathname);
- if (rule->filterkey) {
- audit_log_format(ab, " key=");
- audit_log_untrustedstring(ab, rule->filterkey);
- } else
- audit_log_format(ab, " key=(null)");
+ audit_log_key(ab, rule->filterkey);
audit_log_format(ab, " list=%d res=1", rule->listnr);
audit_log_end(ab);
rule->tree = NULL;
@@ -519,6 +518,8 @@
}
}
+static void audit_schedule_prune(void);
+
/* called with audit_filter_mutex */
int audit_remove_tree_rule(struct audit_krule *rule)
{
@@ -824,10 +825,11 @@
/*
* That gets run when evict_chunk() ends up needing to kill audit_tree.
- * Runs from a separate thread, with audit_cmd_mutex held.
+ * Runs from a separate thread.
*/
-void audit_prune_trees(void)
+static int prune_tree_thread(void *unused)
{
+ mutex_lock(&audit_cmd_mutex);
mutex_lock(&audit_filter_mutex);
while (!list_empty(&prune_list)) {
@@ -844,6 +846,40 @@
}
mutex_unlock(&audit_filter_mutex);
+ mutex_unlock(&audit_cmd_mutex);
+ return 0;
+}
+
+static void audit_schedule_prune(void)
+{
+ kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
+}
+
+/*
+ * ... and that one is done if evict_chunk() decides to delay until the end
+ * of syscall. Runs synchronously.
+ */
+void audit_kill_trees(struct list_head *list)
+{
+ mutex_lock(&audit_cmd_mutex);
+ mutex_lock(&audit_filter_mutex);
+
+ while (!list_empty(list)) {
+ struct audit_tree *victim;
+
+ victim = list_entry(list->next, struct audit_tree, list);
+ kill_rules(victim);
+ list_del_init(&victim->list);
+
+ mutex_unlock(&audit_filter_mutex);
+
+ prune_one(victim);
+
+ mutex_lock(&audit_filter_mutex);
+ }
+
+ mutex_unlock(&audit_filter_mutex);
+ mutex_unlock(&audit_cmd_mutex);
}
/*
@@ -854,6 +890,8 @@
static void evict_chunk(struct audit_chunk *chunk)
{
struct audit_tree *owner;
+ struct list_head *postponed = audit_killed_trees();
+ int need_prune = 0;
int n;
if (chunk->dead)
@@ -869,15 +907,21 @@
owner->root = NULL;
list_del_init(&owner->same_root);
spin_unlock(&hash_lock);
- kill_rules(owner);
- list_move(&owner->list, &prune_list);
- audit_schedule_prune();
+ if (!postponed) {
+ kill_rules(owner);
+ list_move(&owner->list, &prune_list);
+ need_prune = 1;
+ } else {
+ list_move(&owner->list, postponed);
+ }
spin_lock(&hash_lock);
}
list_del_rcu(&chunk->hash);
for (n = 0; n < chunk->count; n++)
list_del_init(&chunk->owners[n].list);
spin_unlock(&hash_lock);
+ if (need_prune)
+ audit_schedule_prune();
mutex_unlock(&audit_filter_mutex);
}
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
new file mode 100644
index 0000000..0e96dbc
--- /dev/null
+++ b/kernel/audit_watch.c
@@ -0,0 +1,543 @@
+/* audit_watch.c -- watching inodes
+ *
+ * Copyright 2003-2009 Red Hat, Inc.
+ * Copyright 2005 Hewlett-Packard Development Company, L.P.
+ * Copyright 2005 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/audit.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/netlink.h>
+#include <linux/sched.h>
+#include <linux/inotify.h>
+#include <linux/security.h>
+#include "audit.h"
+
+/*
+ * Reference counting:
+ *
+ * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED
+ * event. Each audit_watch holds a reference to its associated parent.
+ *
+ * audit_watch: if added to lists, lifetime is from audit_init_watch() to
+ * audit_remove_watch(). Additionally, an audit_watch may exist
+ * temporarily to assist in searching existing filter data. Each
+ * audit_krule holds a reference to its associated watch.
+ */
+
+struct audit_watch {
+ atomic_t count; /* reference count */
+ char *path; /* insertion path */
+ dev_t dev; /* associated superblock device */
+ unsigned long ino; /* associated inode number */
+ struct audit_parent *parent; /* associated parent */
+ struct list_head wlist; /* entry in parent->watches list */
+ struct list_head rules; /* associated rules */
+};
+
+struct audit_parent {
+ struct list_head ilist; /* entry in inotify registration list */
+ struct list_head watches; /* associated watches */
+ struct inotify_watch wdata; /* inotify watch data */
+ unsigned flags; /* status flags */
+};
+
+/* Inotify handle. */
+struct inotify_handle *audit_ih;
+
+/*
+ * audit_parent status flags:
+ *
+ * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to
+ * a filesystem event to ensure we're adding audit watches to a valid parent.
+ * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot
+ * receive them while we have nameidata, but must be used for IN_MOVE_SELF which
+ * we can receive while holding nameidata.
+ */
+#define AUDIT_PARENT_INVALID 0x001
+
+/* Inotify events we care about. */
+#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
+
+static void audit_free_parent(struct inotify_watch *i_watch)
+{
+ struct audit_parent *parent;
+
+ parent = container_of(i_watch, struct audit_parent, wdata);
+ WARN_ON(!list_empty(&parent->watches));
+ kfree(parent);
+}
+
+void audit_get_watch(struct audit_watch *watch)
+{
+ atomic_inc(&watch->count);
+}
+
+void audit_put_watch(struct audit_watch *watch)
+{
+ if (atomic_dec_and_test(&watch->count)) {
+ WARN_ON(watch->parent);
+ WARN_ON(!list_empty(&watch->rules));
+ kfree(watch->path);
+ kfree(watch);
+ }
+}
+
+void audit_remove_watch(struct audit_watch *watch)
+{
+ list_del(&watch->wlist);
+ put_inotify_watch(&watch->parent->wdata);
+ watch->parent = NULL;
+ audit_put_watch(watch); /* match initial get */
+}
+
+char *audit_watch_path(struct audit_watch *watch)
+{
+ return watch->path;
+}
+
+struct list_head *audit_watch_rules(struct audit_watch *watch)
+{
+ return &watch->rules;
+}
+
+unsigned long audit_watch_inode(struct audit_watch *watch)
+{
+ return watch->ino;
+}
+
+dev_t audit_watch_dev(struct audit_watch *watch)
+{
+ return watch->dev;
+}
+
+/* Initialize a parent watch entry. */
+static struct audit_parent *audit_init_parent(struct nameidata *ndp)
+{
+ struct audit_parent *parent;
+ s32 wd;
+
+ parent = kzalloc(sizeof(*parent), GFP_KERNEL);
+ if (unlikely(!parent))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&parent->watches);
+ parent->flags = 0;
+
+ inotify_init_watch(&parent->wdata);
+ /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */
+ get_inotify_watch(&parent->wdata);
+ wd = inotify_add_watch(audit_ih, &parent->wdata,
+ ndp->path.dentry->d_inode, AUDIT_IN_WATCH);
+ if (wd < 0) {
+ audit_free_parent(&parent->wdata);
+ return ERR_PTR(wd);
+ }
+
+ return parent;
+}
+
+/* Initialize a watch entry. */
+static struct audit_watch *audit_init_watch(char *path)
+{
+ struct audit_watch *watch;
+
+ watch = kzalloc(sizeof(*watch), GFP_KERNEL);
+ if (unlikely(!watch))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&watch->rules);
+ atomic_set(&watch->count, 1);
+ watch->path = path;
+ watch->dev = (dev_t)-1;
+ watch->ino = (unsigned long)-1;
+
+ return watch;
+}
+
+/* Translate a watch string to kernel respresentation. */
+int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op)
+{
+ struct audit_watch *watch;
+
+ if (!audit_ih)
+ return -EOPNOTSUPP;
+
+ if (path[0] != '/' || path[len-1] == '/' ||
+ krule->listnr != AUDIT_FILTER_EXIT ||
+ op != Audit_equal ||
+ krule->inode_f || krule->watch || krule->tree)
+ return -EINVAL;
+
+ watch = audit_init_watch(path);
+ if (IS_ERR(watch))
+ return PTR_ERR(watch);
+
+ audit_get_watch(watch);
+ krule->watch = watch;
+
+ return 0;
+}
+
+/* Duplicate the given audit watch. The new watch's rules list is initialized
+ * to an empty list and wlist is undefined. */
+static struct audit_watch *audit_dupe_watch(struct audit_watch *old)
+{
+ char *path;
+ struct audit_watch *new;
+
+ path = kstrdup(old->path, GFP_KERNEL);
+ if (unlikely(!path))
+ return ERR_PTR(-ENOMEM);
+
+ new = audit_init_watch(path);
+ if (IS_ERR(new)) {
+ kfree(path);
+ goto out;
+ }
+
+ new->dev = old->dev;
+ new->ino = old->ino;
+ get_inotify_watch(&old->parent->wdata);
+ new->parent = old->parent;
+
+out:
+ return new;
+}
+
+static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watch *w, char *op)
+{
+ if (audit_enabled) {
+ struct audit_buffer *ab;
+ ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
+ audit_log_format(ab, "auid=%u ses=%u op=",
+ audit_get_loginuid(current),
+ audit_get_sessionid(current));
+ audit_log_string(ab, op);
+ audit_log_format(ab, " path=");
+ audit_log_untrustedstring(ab, w->path);
+ audit_log_key(ab, r->filterkey);
+ audit_log_format(ab, " list=%d res=1", r->listnr);
+ audit_log_end(ab);
+ }
+}
+
+/* Update inode info in audit rules based on filesystem event. */
+static void audit_update_watch(struct audit_parent *parent,
+ const char *dname, dev_t dev,
+ unsigned long ino, unsigned invalidating)
+{
+ struct audit_watch *owatch, *nwatch, *nextw;
+ struct audit_krule *r, *nextr;
+ struct audit_entry *oentry, *nentry;
+
+ mutex_lock(&audit_filter_mutex);
+ list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) {
+ if (audit_compare_dname_path(dname, owatch->path, NULL))
+ continue;
+
+ /* If the update involves invalidating rules, do the inode-based
+ * filtering now, so we don't omit records. */
+ if (invalidating && current->audit_context)
+ audit_filter_inodes(current, current->audit_context);
+
+ nwatch = audit_dupe_watch(owatch);
+ if (IS_ERR(nwatch)) {
+ mutex_unlock(&audit_filter_mutex);
+ audit_panic("error updating watch, skipping");
+ return;
+ }
+ nwatch->dev = dev;
+ nwatch->ino = ino;
+
+ list_for_each_entry_safe(r, nextr, &owatch->rules, rlist) {
+
+ oentry = container_of(r, struct audit_entry, rule);
+ list_del(&oentry->rule.rlist);
+ list_del_rcu(&oentry->list);
+
+ nentry = audit_dupe_rule(&oentry->rule, nwatch);
+ if (IS_ERR(nentry)) {
+ list_del(&oentry->rule.list);
+ audit_panic("error updating watch, removing");
+ } else {
+ int h = audit_hash_ino((u32)ino);
+ list_add(&nentry->rule.rlist, &nwatch->rules);
+ list_add_rcu(&nentry->list, &audit_inode_hash[h]);
+ list_replace(&oentry->rule.list,
+ &nentry->rule.list);
+ }
+
+ audit_watch_log_rule_change(r, owatch, "updated rules");
+
+ call_rcu(&oentry->rcu, audit_free_rule_rcu);
+ }
+
+ audit_remove_watch(owatch);
+ goto add_watch_to_parent; /* event applies to a single watch */
+ }
+ mutex_unlock(&audit_filter_mutex);
+ return;
+
+add_watch_to_parent:
+ list_add(&nwatch->wlist, &parent->watches);
+ mutex_unlock(&audit_filter_mutex);
+ return;
+}
+
+/* Remove all watches & rules associated with a parent that is going away. */
+static void audit_remove_parent_watches(struct audit_parent *parent)
+{
+ struct audit_watch *w, *nextw;
+ struct audit_krule *r, *nextr;
+ struct audit_entry *e;
+
+ mutex_lock(&audit_filter_mutex);
+ parent->flags |= AUDIT_PARENT_INVALID;
+ list_for_each_entry_safe(w, nextw, &parent->watches, wlist) {
+ list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
+ e = container_of(r, struct audit_entry, rule);
+ audit_watch_log_rule_change(r, w, "remove rule");
+ list_del(&r->rlist);
+ list_del(&r->list);
+ list_del_rcu(&e->list);
+ call_rcu(&e->rcu, audit_free_rule_rcu);
+ }
+ audit_remove_watch(w);
+ }
+ mutex_unlock(&audit_filter_mutex);
+}
+
+/* Unregister inotify watches for parents on in_list.
+ * Generates an IN_IGNORED event. */
+void audit_inotify_unregister(struct list_head *in_list)
+{
+ struct audit_parent *p, *n;
+
+ list_for_each_entry_safe(p, n, in_list, ilist) {
+ list_del(&p->ilist);
+ inotify_rm_watch(audit_ih, &p->wdata);
+ /* the unpin matching the pin in audit_do_del_rule() */
+ unpin_inotify_watch(&p->wdata);
+ }
+}
+
+/* Get path information necessary for adding watches. */
+static int audit_get_nd(char *path, struct nameidata **ndp, struct nameidata **ndw)
+{
+ struct nameidata *ndparent, *ndwatch;
+ int err;
+
+ ndparent = kmalloc(sizeof(*ndparent), GFP_KERNEL);
+ if (unlikely(!ndparent))
+ return -ENOMEM;
+
+ ndwatch = kmalloc(sizeof(*ndwatch), GFP_KERNEL);
+ if (unlikely(!ndwatch)) {
+ kfree(ndparent);
+ return -ENOMEM;
+ }
+
+ err = path_lookup(path, LOOKUP_PARENT, ndparent);
+ if (err) {
+ kfree(ndparent);
+ kfree(ndwatch);
+ return err;
+ }
+
+ err = path_lookup(path, 0, ndwatch);
+ if (err) {
+ kfree(ndwatch);
+ ndwatch = NULL;
+ }
+
+ *ndp = ndparent;
+ *ndw = ndwatch;
+
+ return 0;
+}
+
+/* Release resources used for watch path information. */
+static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
+{
+ if (ndp) {
+ path_put(&ndp->path);
+ kfree(ndp);
+ }
+ if (ndw) {
+ path_put(&ndw->path);
+ kfree(ndw);
+ }
+}
+
+/* Associate the given rule with an existing parent inotify_watch.
+ * Caller must hold audit_filter_mutex. */
+static void audit_add_to_parent(struct audit_krule *krule,
+ struct audit_parent *parent)
+{
+ struct audit_watch *w, *watch = krule->watch;
+ int watch_found = 0;
+
+ list_for_each_entry(w, &parent->watches, wlist) {
+ if (strcmp(watch->path, w->path))
+ continue;
+
+ watch_found = 1;
+
+ /* put krule's and initial refs to temporary watch */
+ audit_put_watch(watch);
+ audit_put_watch(watch);
+
+ audit_get_watch(w);
+ krule->watch = watch = w;
+ break;
+ }
+
+ if (!watch_found) {
+ get_inotify_watch(&parent->wdata);
+ watch->parent = parent;
+
+ list_add(&watch->wlist, &parent->watches);
+ }
+ list_add(&krule->rlist, &watch->rules);
+}
+
+/* Find a matching watch entry, or add this one.
+ * Caller must hold audit_filter_mutex. */
+int audit_add_watch(struct audit_krule *krule)
+{
+ struct audit_watch *watch = krule->watch;
+ struct inotify_watch *i_watch;
+ struct audit_parent *parent;
+ struct nameidata *ndp = NULL, *ndw = NULL;
+ int ret = 0;
+
+ mutex_unlock(&audit_filter_mutex);
+
+ /* Avoid calling path_lookup under audit_filter_mutex. */
+ ret = audit_get_nd(watch->path, &ndp, &ndw);
+ if (ret) {
+ /* caller expects mutex locked */
+ mutex_lock(&audit_filter_mutex);
+ goto error;
+ }
+
+ /* update watch filter fields */
+ if (ndw) {
+ watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
+ watch->ino = ndw->path.dentry->d_inode->i_ino;
+ }
+
+ /* The audit_filter_mutex must not be held during inotify calls because
+ * we hold it during inotify event callback processing. If an existing
+ * inotify watch is found, inotify_find_watch() grabs a reference before
+ * returning.
+ */
+ if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode,
+ &i_watch) < 0) {
+ parent = audit_init_parent(ndp);
+ if (IS_ERR(parent)) {
+ /* caller expects mutex locked */
+ mutex_lock(&audit_filter_mutex);
+ ret = PTR_ERR(parent);
+ goto error;
+ }
+ } else
+ parent = container_of(i_watch, struct audit_parent, wdata);
+
+ mutex_lock(&audit_filter_mutex);
+
+ /* parent was moved before we took audit_filter_mutex */
+ if (parent->flags & AUDIT_PARENT_INVALID)
+ ret = -ENOENT;
+ else
+ audit_add_to_parent(krule, parent);
+
+ /* match get in audit_init_parent or inotify_find_watch */
+ put_inotify_watch(&parent->wdata);
+
+error:
+ audit_put_nd(ndp, ndw); /* NULL args OK */
+ return ret;
+
+}
+
+void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
+{
+ struct audit_watch *watch = krule->watch;
+ struct audit_parent *parent = watch->parent;
+
+ list_del(&krule->rlist);
+
+ if (list_empty(&watch->rules)) {
+ audit_remove_watch(watch);
+
+ if (list_empty(&parent->watches)) {
+ /* Put parent on the inotify un-registration
+ * list. Grab a reference before releasing
+ * audit_filter_mutex, to be released in
+ * audit_inotify_unregister().
+ * If filesystem is going away, just leave
+ * the sucker alone, eviction will take
+ * care of it. */
+ if (pin_inotify_watch(&parent->wdata))
+ list_add(&parent->ilist, list);
+ }
+ }
+}
+
+/* Update watch data in audit rules based on inotify events. */
+static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask,
+ u32 cookie, const char *dname, struct inode *inode)
+{
+ struct audit_parent *parent;
+
+ parent = container_of(i_watch, struct audit_parent, wdata);
+
+ if (mask & (IN_CREATE|IN_MOVED_TO) && inode)
+ audit_update_watch(parent, dname, inode->i_sb->s_dev,
+ inode->i_ino, 0);
+ else if (mask & (IN_DELETE|IN_MOVED_FROM))
+ audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
+ /* inotify automatically removes the watch and sends IN_IGNORED */
+ else if (mask & (IN_DELETE_SELF|IN_UNMOUNT))
+ audit_remove_parent_watches(parent);
+ /* inotify does not remove the watch, so remove it manually */
+ else if(mask & IN_MOVE_SELF) {
+ audit_remove_parent_watches(parent);
+ inotify_remove_watch_locked(audit_ih, i_watch);
+ } else if (mask & IN_IGNORED)
+ put_inotify_watch(i_watch);
+}
+
+static const struct inotify_operations audit_inotify_ops = {
+ .handle_event = audit_handle_ievent,
+ .destroy_watch = audit_free_parent,
+};
+
+static int __init audit_watch_init(void)
+{
+ audit_ih = inotify_init(&audit_inotify_ops);
+ if (IS_ERR(audit_ih))
+ audit_panic("cannot initialize inotify handle");
+ return 0;
+}
+subsys_initcall(audit_watch_init);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 713098e..a706040 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -27,7 +27,6 @@
#include <linux/namei.h>
#include <linux/netlink.h>
#include <linux/sched.h>
-#include <linux/inotify.h>
#include <linux/security.h>
#include "audit.h"
@@ -44,36 +43,6 @@
* be written directly provided audit_filter_mutex is held.
*/
-/*
- * Reference counting:
- *
- * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED
- * event. Each audit_watch holds a reference to its associated parent.
- *
- * audit_watch: if added to lists, lifetime is from audit_init_watch() to
- * audit_remove_watch(). Additionally, an audit_watch may exist
- * temporarily to assist in searching existing filter data. Each
- * audit_krule holds a reference to its associated watch.
- */
-
-struct audit_parent {
- struct list_head ilist; /* entry in inotify registration list */
- struct list_head watches; /* associated watches */
- struct inotify_watch wdata; /* inotify watch data */
- unsigned flags; /* status flags */
-};
-
-/*
- * audit_parent status flags:
- *
- * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to
- * a filesystem event to ensure we're adding audit watches to a valid parent.
- * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot
- * receive them while we have nameidata, but must be used for IN_MOVE_SELF which
- * we can receive while holding nameidata.
- */
-#define AUDIT_PARENT_INVALID 0x001
-
/* Audit filter lists, defined in <linux/audit.h> */
struct list_head audit_filter_list[AUDIT_NR_FILTERS] = {
LIST_HEAD_INIT(audit_filter_list[0]),
@@ -97,41 +66,6 @@
DEFINE_MUTEX(audit_filter_mutex);
-/* Inotify events we care about. */
-#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
-
-void audit_free_parent(struct inotify_watch *i_watch)
-{
- struct audit_parent *parent;
-
- parent = container_of(i_watch, struct audit_parent, wdata);
- WARN_ON(!list_empty(&parent->watches));
- kfree(parent);
-}
-
-static inline void audit_get_watch(struct audit_watch *watch)
-{
- atomic_inc(&watch->count);
-}
-
-static void audit_put_watch(struct audit_watch *watch)
-{
- if (atomic_dec_and_test(&watch->count)) {
- WARN_ON(watch->parent);
- WARN_ON(!list_empty(&watch->rules));
- kfree(watch->path);
- kfree(watch);
- }
-}
-
-static void audit_remove_watch(struct audit_watch *watch)
-{
- list_del(&watch->wlist);
- put_inotify_watch(&watch->parent->wdata);
- watch->parent = NULL;
- audit_put_watch(watch); /* match initial get */
-}
-
static inline void audit_free_rule(struct audit_entry *e)
{
int i;
@@ -156,50 +90,6 @@
audit_free_rule(e);
}
-/* Initialize a parent watch entry. */
-static struct audit_parent *audit_init_parent(struct nameidata *ndp)
-{
- struct audit_parent *parent;
- s32 wd;
-
- parent = kzalloc(sizeof(*parent), GFP_KERNEL);
- if (unlikely(!parent))
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&parent->watches);
- parent->flags = 0;
-
- inotify_init_watch(&parent->wdata);
- /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */
- get_inotify_watch(&parent->wdata);
- wd = inotify_add_watch(audit_ih, &parent->wdata,
- ndp->path.dentry->d_inode, AUDIT_IN_WATCH);
- if (wd < 0) {
- audit_free_parent(&parent->wdata);
- return ERR_PTR(wd);
- }
-
- return parent;
-}
-
-/* Initialize a watch entry. */
-static struct audit_watch *audit_init_watch(char *path)
-{
- struct audit_watch *watch;
-
- watch = kzalloc(sizeof(*watch), GFP_KERNEL);
- if (unlikely(!watch))
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&watch->rules);
- atomic_set(&watch->count, 1);
- watch->path = path;
- watch->dev = (dev_t)-1;
- watch->ino = (unsigned long)-1;
-
- return watch;
-}
-
/* Initialize an audit filterlist entry. */
static inline struct audit_entry *audit_init_entry(u32 field_count)
{
@@ -260,31 +150,6 @@
return 0;
}
-/* Translate a watch string to kernel respresentation. */
-static int audit_to_watch(struct audit_krule *krule, char *path, int len,
- u32 op)
-{
- struct audit_watch *watch;
-
- if (!audit_ih)
- return -EOPNOTSUPP;
-
- if (path[0] != '/' || path[len-1] == '/' ||
- krule->listnr != AUDIT_FILTER_EXIT ||
- op != Audit_equal ||
- krule->inode_f || krule->watch || krule->tree)
- return -EINVAL;
-
- watch = audit_init_watch(path);
- if (IS_ERR(watch))
- return PTR_ERR(watch);
-
- audit_get_watch(watch);
- krule->watch = watch;
-
- return 0;
-}
-
static __u32 *classes[AUDIT_SYSCALL_CLASSES];
int __init audit_register_class(int class, unsigned *list)
@@ -766,7 +631,8 @@
break;
case AUDIT_WATCH:
data->buflen += data->values[i] =
- audit_pack_string(&bufp, krule->watch->path);
+ audit_pack_string(&bufp,
+ audit_watch_path(krule->watch));
break;
case AUDIT_DIR:
data->buflen += data->values[i] =
@@ -818,7 +684,8 @@
return 1;
break;
case AUDIT_WATCH:
- if (strcmp(a->watch->path, b->watch->path))
+ if (strcmp(audit_watch_path(a->watch),
+ audit_watch_path(b->watch)))
return 1;
break;
case AUDIT_DIR:
@@ -844,32 +711,6 @@
return 0;
}
-/* Duplicate the given audit watch. The new watch's rules list is initialized
- * to an empty list and wlist is undefined. */
-static struct audit_watch *audit_dupe_watch(struct audit_watch *old)
-{
- char *path;
- struct audit_watch *new;
-
- path = kstrdup(old->path, GFP_KERNEL);
- if (unlikely(!path))
- return ERR_PTR(-ENOMEM);
-
- new = audit_init_watch(path);
- if (IS_ERR(new)) {
- kfree(path);
- goto out;
- }
-
- new->dev = old->dev;
- new->ino = old->ino;
- get_inotify_watch(&old->parent->wdata);
- new->parent = old->parent;
-
-out:
- return new;
-}
-
/* Duplicate LSM field information. The lsm_rule is opaque, so must be
* re-initialized. */
static inline int audit_dupe_lsm_field(struct audit_field *df,
@@ -904,8 +745,8 @@
* rule with the new rule in the filterlist, then free the old rule.
* The rlist element is undefined; list manipulations are handled apart from
* the initial copy. */
-static struct audit_entry *audit_dupe_rule(struct audit_krule *old,
- struct audit_watch *watch)
+struct audit_entry *audit_dupe_rule(struct audit_krule *old,
+ struct audit_watch *watch)
{
u32 fcount = old->field_count;
struct audit_entry *entry;
@@ -977,137 +818,6 @@
return entry;
}
-/* Update inode info in audit rules based on filesystem event. */
-static void audit_update_watch(struct audit_parent *parent,
- const char *dname, dev_t dev,
- unsigned long ino, unsigned invalidating)
-{
- struct audit_watch *owatch, *nwatch, *nextw;
- struct audit_krule *r, *nextr;
- struct audit_entry *oentry, *nentry;
-
- mutex_lock(&audit_filter_mutex);
- list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) {
- if (audit_compare_dname_path(dname, owatch->path, NULL))
- continue;
-
- /* If the update involves invalidating rules, do the inode-based
- * filtering now, so we don't omit records. */
- if (invalidating && current->audit_context)
- audit_filter_inodes(current, current->audit_context);
-
- nwatch = audit_dupe_watch(owatch);
- if (IS_ERR(nwatch)) {
- mutex_unlock(&audit_filter_mutex);
- audit_panic("error updating watch, skipping");
- return;
- }
- nwatch->dev = dev;
- nwatch->ino = ino;
-
- list_for_each_entry_safe(r, nextr, &owatch->rules, rlist) {
-
- oentry = container_of(r, struct audit_entry, rule);
- list_del(&oentry->rule.rlist);
- list_del_rcu(&oentry->list);
-
- nentry = audit_dupe_rule(&oentry->rule, nwatch);
- if (IS_ERR(nentry)) {
- list_del(&oentry->rule.list);
- audit_panic("error updating watch, removing");
- } else {
- int h = audit_hash_ino((u32)ino);
- list_add(&nentry->rule.rlist, &nwatch->rules);
- list_add_rcu(&nentry->list, &audit_inode_hash[h]);
- list_replace(&oentry->rule.list,
- &nentry->rule.list);
- }
-
- call_rcu(&oentry->rcu, audit_free_rule_rcu);
- }
-
- if (audit_enabled) {
- struct audit_buffer *ab;
- ab = audit_log_start(NULL, GFP_NOFS,
- AUDIT_CONFIG_CHANGE);
- audit_log_format(ab, "auid=%u ses=%u",
- audit_get_loginuid(current),
- audit_get_sessionid(current));
- audit_log_format(ab,
- " op=updated rules specifying path=");
- audit_log_untrustedstring(ab, owatch->path);
- audit_log_format(ab, " with dev=%u ino=%lu\n",
- dev, ino);
- audit_log_format(ab, " list=%d res=1", r->listnr);
- audit_log_end(ab);
- }
- audit_remove_watch(owatch);
- goto add_watch_to_parent; /* event applies to a single watch */
- }
- mutex_unlock(&audit_filter_mutex);
- return;
-
-add_watch_to_parent:
- list_add(&nwatch->wlist, &parent->watches);
- mutex_unlock(&audit_filter_mutex);
- return;
-}
-
-/* Remove all watches & rules associated with a parent that is going away. */
-static void audit_remove_parent_watches(struct audit_parent *parent)
-{
- struct audit_watch *w, *nextw;
- struct audit_krule *r, *nextr;
- struct audit_entry *e;
-
- mutex_lock(&audit_filter_mutex);
- parent->flags |= AUDIT_PARENT_INVALID;
- list_for_each_entry_safe(w, nextw, &parent->watches, wlist) {
- list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
- e = container_of(r, struct audit_entry, rule);
- if (audit_enabled) {
- struct audit_buffer *ab;
- ab = audit_log_start(NULL, GFP_NOFS,
- AUDIT_CONFIG_CHANGE);
- audit_log_format(ab, "auid=%u ses=%u",
- audit_get_loginuid(current),
- audit_get_sessionid(current));
- audit_log_format(ab, " op=remove rule path=");
- audit_log_untrustedstring(ab, w->path);
- if (r->filterkey) {
- audit_log_format(ab, " key=");
- audit_log_untrustedstring(ab,
- r->filterkey);
- } else
- audit_log_format(ab, " key=(null)");
- audit_log_format(ab, " list=%d res=1",
- r->listnr);
- audit_log_end(ab);
- }
- list_del(&r->rlist);
- list_del(&r->list);
- list_del_rcu(&e->list);
- call_rcu(&e->rcu, audit_free_rule_rcu);
- }
- audit_remove_watch(w);
- }
- mutex_unlock(&audit_filter_mutex);
-}
-
-/* Unregister inotify watches for parents on in_list.
- * Generates an IN_IGNORED event. */
-static void audit_inotify_unregister(struct list_head *in_list)
-{
- struct audit_parent *p, *n;
-
- list_for_each_entry_safe(p, n, in_list, ilist) {
- list_del(&p->ilist);
- inotify_rm_watch(audit_ih, &p->wdata);
- /* the unpin matching the pin in audit_do_del_rule() */
- unpin_inotify_watch(&p->wdata);
- }
-}
-
/* Find an existing audit rule.
* Caller must hold audit_filter_mutex to prevent stale rule data. */
static struct audit_entry *audit_find_rule(struct audit_entry *entry,
@@ -1145,134 +855,6 @@
return found;
}
-/* Get path information necessary for adding watches. */
-static int audit_get_nd(char *path, struct nameidata **ndp,
- struct nameidata **ndw)
-{
- struct nameidata *ndparent, *ndwatch;
- int err;
-
- ndparent = kmalloc(sizeof(*ndparent), GFP_KERNEL);
- if (unlikely(!ndparent))
- return -ENOMEM;
-
- ndwatch = kmalloc(sizeof(*ndwatch), GFP_KERNEL);
- if (unlikely(!ndwatch)) {
- kfree(ndparent);
- return -ENOMEM;
- }
-
- err = path_lookup(path, LOOKUP_PARENT, ndparent);
- if (err) {
- kfree(ndparent);
- kfree(ndwatch);
- return err;
- }
-
- err = path_lookup(path, 0, ndwatch);
- if (err) {
- kfree(ndwatch);
- ndwatch = NULL;
- }
-
- *ndp = ndparent;
- *ndw = ndwatch;
-
- return 0;
-}
-
-/* Release resources used for watch path information. */
-static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
-{
- if (ndp) {
- path_put(&ndp->path);
- kfree(ndp);
- }
- if (ndw) {
- path_put(&ndw->path);
- kfree(ndw);
- }
-}
-
-/* Associate the given rule with an existing parent inotify_watch.
- * Caller must hold audit_filter_mutex. */
-static void audit_add_to_parent(struct audit_krule *krule,
- struct audit_parent *parent)
-{
- struct audit_watch *w, *watch = krule->watch;
- int watch_found = 0;
-
- list_for_each_entry(w, &parent->watches, wlist) {
- if (strcmp(watch->path, w->path))
- continue;
-
- watch_found = 1;
-
- /* put krule's and initial refs to temporary watch */
- audit_put_watch(watch);
- audit_put_watch(watch);
-
- audit_get_watch(w);
- krule->watch = watch = w;
- break;
- }
-
- if (!watch_found) {
- get_inotify_watch(&parent->wdata);
- watch->parent = parent;
-
- list_add(&watch->wlist, &parent->watches);
- }
- list_add(&krule->rlist, &watch->rules);
-}
-
-/* Find a matching watch entry, or add this one.
- * Caller must hold audit_filter_mutex. */
-static int audit_add_watch(struct audit_krule *krule, struct nameidata *ndp,
- struct nameidata *ndw)
-{
- struct audit_watch *watch = krule->watch;
- struct inotify_watch *i_watch;
- struct audit_parent *parent;
- int ret = 0;
-
- /* update watch filter fields */
- if (ndw) {
- watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
- watch->ino = ndw->path.dentry->d_inode->i_ino;
- }
-
- /* The audit_filter_mutex must not be held during inotify calls because
- * we hold it during inotify event callback processing. If an existing
- * inotify watch is found, inotify_find_watch() grabs a reference before
- * returning.
- */
- mutex_unlock(&audit_filter_mutex);
-
- if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode,
- &i_watch) < 0) {
- parent = audit_init_parent(ndp);
- if (IS_ERR(parent)) {
- /* caller expects mutex locked */
- mutex_lock(&audit_filter_mutex);
- return PTR_ERR(parent);
- }
- } else
- parent = container_of(i_watch, struct audit_parent, wdata);
-
- mutex_lock(&audit_filter_mutex);
-
- /* parent was moved before we took audit_filter_mutex */
- if (parent->flags & AUDIT_PARENT_INVALID)
- ret = -ENOENT;
- else
- audit_add_to_parent(krule, parent);
-
- /* match get in audit_init_parent or inotify_find_watch */
- put_inotify_watch(&parent->wdata);
- return ret;
-}
-
static u64 prio_low = ~0ULL/2;
static u64 prio_high = ~0ULL/2 - 1;
@@ -1282,7 +864,6 @@
struct audit_entry *e;
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
- struct nameidata *ndp = NULL, *ndw = NULL;
struct list_head *list;
int h, err;
#ifdef CONFIG_AUDITSYSCALL
@@ -1296,8 +877,8 @@
mutex_lock(&audit_filter_mutex);
e = audit_find_rule(entry, &list);
- mutex_unlock(&audit_filter_mutex);
if (e) {
+ mutex_unlock(&audit_filter_mutex);
err = -EEXIST;
/* normally audit_add_tree_rule() will free it on failure */
if (tree)
@@ -1305,22 +886,16 @@
goto error;
}
- /* Avoid calling path_lookup under audit_filter_mutex. */
- if (watch) {
- err = audit_get_nd(watch->path, &ndp, &ndw);
- if (err)
- goto error;
- }
-
- mutex_lock(&audit_filter_mutex);
if (watch) {
/* audit_filter_mutex is dropped and re-taken during this call */
- err = audit_add_watch(&entry->rule, ndp, ndw);
+ err = audit_add_watch(&entry->rule);
if (err) {
mutex_unlock(&audit_filter_mutex);
goto error;
}
- h = audit_hash_ino((u32)watch->ino);
+ /* entry->rule.watch may have changed during audit_add_watch() */
+ watch = entry->rule.watch;
+ h = audit_hash_ino((u32)audit_watch_inode(watch));
list = &audit_inode_hash[h];
}
if (tree) {
@@ -1358,11 +933,9 @@
#endif
mutex_unlock(&audit_filter_mutex);
- audit_put_nd(ndp, ndw); /* NULL args OK */
return 0;
error:
- audit_put_nd(ndp, ndw); /* NULL args OK */
if (watch)
audit_put_watch(watch); /* tmp watch, matches initial get */
return err;
@@ -1372,7 +945,7 @@
static inline int audit_del_rule(struct audit_entry *entry)
{
struct audit_entry *e;
- struct audit_watch *watch, *tmp_watch = entry->rule.watch;
+ struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
LIST_HEAD(inotify_list);
@@ -1394,29 +967,8 @@
goto out;
}
- watch = e->rule.watch;
- if (watch) {
- struct audit_parent *parent = watch->parent;
-
- list_del(&e->rule.rlist);
-
- if (list_empty(&watch->rules)) {
- audit_remove_watch(watch);
-
- if (list_empty(&parent->watches)) {
- /* Put parent on the inotify un-registration
- * list. Grab a reference before releasing
- * audit_filter_mutex, to be released in
- * audit_inotify_unregister().
- * If filesystem is going away, just leave
- * the sucker alone, eviction will take
- * care of it.
- */
- if (pin_inotify_watch(&parent->wdata))
- list_add(&parent->ilist, &inotify_list);
- }
- }
- }
+ if (e->rule.watch)
+ audit_remove_watch_rule(&e->rule, &inotify_list);
if (e->rule.tree)
audit_remove_tree_rule(&e->rule);
@@ -1438,8 +990,8 @@
audit_inotify_unregister(&inotify_list);
out:
- if (tmp_watch)
- audit_put_watch(tmp_watch); /* match initial get */
+ if (watch)
+ audit_put_watch(watch); /* match initial get */
if (tree)
audit_put_tree(tree); /* that's the temporary one */
@@ -1527,11 +1079,9 @@
security_release_secctx(ctx, len);
}
}
- audit_log_format(ab, " op=%s rule key=", action);
- if (rule->filterkey)
- audit_log_untrustedstring(ab, rule->filterkey);
- else
- audit_log_format(ab, "(null)");
+ audit_log_format(ab, " op=");
+ audit_log_string(ab, action);
+ audit_log_key(ab, rule->filterkey);
audit_log_format(ab, " list=%d res=%d", rule->listnr, res);
audit_log_end(ab);
}
@@ -1595,7 +1145,7 @@
return PTR_ERR(entry);
err = audit_add_rule(entry);
- audit_log_rule_change(loginuid, sessionid, sid, "add",
+ audit_log_rule_change(loginuid, sessionid, sid, "add rule",
&entry->rule, !err);
if (err)
@@ -1611,7 +1161,7 @@
return PTR_ERR(entry);
err = audit_del_rule(entry);
- audit_log_rule_change(loginuid, sessionid, sid, "remove",
+ audit_log_rule_change(loginuid, sessionid, sid, "remove rule",
&entry->rule, !err);
audit_free_rule(entry);
@@ -1793,7 +1343,7 @@
list_del(&r->list);
} else {
if (watch) {
- list_add(&nentry->rule.rlist, &watch->rules);
+ list_add(&nentry->rule.rlist, audit_watch_rules(watch));
list_del(&r->rlist);
} else if (tree)
list_replace_init(&r->rlist, &nentry->rule.rlist);
@@ -1829,27 +1379,3 @@
return err;
}
-
-/* Update watch data in audit rules based on inotify events. */
-void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask,
- u32 cookie, const char *dname, struct inode *inode)
-{
- struct audit_parent *parent;
-
- parent = container_of(i_watch, struct audit_parent, wdata);
-
- if (mask & (IN_CREATE|IN_MOVED_TO) && inode)
- audit_update_watch(parent, dname, inode->i_sb->s_dev,
- inode->i_ino, 0);
- else if (mask & (IN_DELETE|IN_MOVED_FROM))
- audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
- /* inotify automatically removes the watch and sends IN_IGNORED */
- else if (mask & (IN_DELETE_SELF|IN_UNMOUNT))
- audit_remove_parent_watches(parent);
- /* inotify does not remove the watch, so remove it manually */
- else if(mask & IN_MOVE_SELF) {
- audit_remove_parent_watches(parent);
- inotify_remove_watch_locked(audit_ih, i_watch);
- } else if (mask & IN_IGNORED)
- put_inotify_watch(i_watch);
-}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 7d6ac7c..68d3c6a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -199,6 +199,7 @@
struct audit_tree_refs *trees, *first_trees;
int tree_count;
+ struct list_head killed_trees;
int type;
union {
@@ -548,9 +549,9 @@
}
break;
case AUDIT_WATCH:
- if (name && rule->watch->ino != (unsigned long)-1)
- result = (name->dev == rule->watch->dev &&
- name->ino == rule->watch->ino);
+ if (name && audit_watch_inode(rule->watch) != (unsigned long)-1)
+ result = (name->dev == audit_watch_dev(rule->watch) &&
+ name->ino == audit_watch_inode(rule->watch));
break;
case AUDIT_DIR:
if (ctx)
@@ -853,6 +854,7 @@
if (!(context = kmalloc(sizeof(*context), GFP_KERNEL)))
return NULL;
audit_zero_context(context, state);
+ INIT_LIST_HEAD(&context->killed_trees);
return context;
}
@@ -1024,8 +1026,8 @@
{
char arg_num_len_buf[12];
const char __user *tmp_p = p;
- /* how many digits are in arg_num? 3 is the length of " a=" */
- size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 3;
+ /* how many digits are in arg_num? 5 is the length of ' a=""' */
+ size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5;
size_t len, len_left, to_send;
size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
unsigned int i, has_cntl = 0, too_long = 0;
@@ -1137,7 +1139,7 @@
if (has_cntl)
audit_log_n_hex(*ab, buf, to_send);
else
- audit_log_format(*ab, "\"%s\"", buf);
+ audit_log_string(*ab, buf);
p += to_send;
len_left -= to_send;
@@ -1372,11 +1374,7 @@
audit_log_task_info(ab, tsk);
- if (context->filterkey) {
- audit_log_format(ab, " key=");
- audit_log_untrustedstring(ab, context->filterkey);
- } else
- audit_log_format(ab, " key=(null)");
+ audit_log_key(ab, context->filterkey);
audit_log_end(ab);
for (aux = context->aux; aux; aux = aux->next) {
@@ -1549,6 +1547,8 @@
/* that can happen only if we are called from do_exit() */
if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
audit_log_exit(context, tsk);
+ if (!list_empty(&context->killed_trees))
+ audit_kill_trees(&context->killed_trees);
audit_free_context(context);
}
@@ -1692,6 +1692,9 @@
context->in_syscall = 0;
context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
+ if (!list_empty(&context->killed_trees))
+ audit_kill_trees(&context->killed_trees);
+
if (context->previous) {
struct audit_context *new_context = context->previous;
context->previous = NULL;
@@ -2525,3 +2528,11 @@
audit_log_format(ab, " sig=%ld", signr);
audit_log_end(ab);
}
+
+struct list_head *audit_killed_trees(void)
+{
+ struct audit_context *ctx = current->audit_context;
+ if (likely(!ctx || !ctx->in_syscall))
+ return NULL;
+ return &ctx->killed_trees;
+}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 395b697..8ce1004 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -34,14 +34,11 @@
* an ongoing cpu hotplug operation.
*/
int refcount;
-} cpu_hotplug;
-
-void __init cpu_hotplug_init(void)
-{
- cpu_hotplug.active_writer = NULL;
- mutex_init(&cpu_hotplug.lock);
- cpu_hotplug.refcount = 0;
-}
+} cpu_hotplug = {
+ .active_writer = NULL,
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+ .refcount = 0,
+};
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/futex.c b/kernel/futex.c
index 80b5ce7..794c862 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -284,6 +284,25 @@
drop_futex_key_refs(key);
}
+/*
+ * fault_in_user_writeable - fault in user address and verify RW access
+ * @uaddr: pointer to faulting user space address
+ *
+ * Slow path to fixup the fault we just took in the atomic write
+ * access to @uaddr.
+ *
+ * We have no generic implementation of a non destructive write to the
+ * user address. We know that we faulted in the atomic pagefault
+ * disabled section so we can as well avoid the #PF overhead by
+ * calling get_user_pages() right away.
+ */
+static int fault_in_user_writeable(u32 __user *uaddr)
+{
+ int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
+ 1, 1, 0, NULL, NULL);
+ return ret < 0 ? ret : 0;
+}
+
/**
* futex_top_waiter() - Return the highest priority waiter on a futex
* @hb: the hash bucket the futex_q's reside in
@@ -896,7 +915,6 @@
retry_private:
op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
- u32 dummy;
double_unlock_hb(hb1, hb2);
@@ -914,7 +932,7 @@
goto out_put_keys;
}
- ret = get_user(dummy, uaddr2);
+ ret = fault_in_user_writeable(uaddr2);
if (ret)
goto out_put_keys;
@@ -1204,7 +1222,7 @@
double_unlock_hb(hb1, hb2);
put_futex_key(fshared, &key2);
put_futex_key(fshared, &key1);
- ret = get_user(curval2, uaddr2);
+ ret = fault_in_user_writeable(uaddr2);
if (!ret)
goto retry;
goto out;
@@ -1482,7 +1500,7 @@
handle_fault:
spin_unlock(q->lock_ptr);
- ret = get_user(uval, uaddr);
+ ret = fault_in_user_writeable(uaddr);
spin_lock(q->lock_ptr);
@@ -1807,7 +1825,6 @@
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_hash_bucket *hb;
- u32 uval;
struct futex_q q;
int res, ret;
@@ -1909,16 +1926,9 @@
return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
- /*
- * We have to r/w *(int __user *)uaddr, and we have to modify it
- * atomically. Therefore, if we continue to fault after get_user()
- * below, we need to handle the fault ourselves, while still holding
- * the mmap_sem. This can occur if the uaddr is under contention as
- * we have to drop the mmap_sem in order to call get_user().
- */
queue_unlock(&q, hb);
- ret = get_user(uval, uaddr);
+ ret = fault_in_user_writeable(uaddr);
if (ret)
goto out_put_key;
@@ -2013,17 +2023,10 @@
return ret;
pi_faulted:
- /*
- * We have to r/w *(int __user *)uaddr, and we have to modify it
- * atomically. Therefore, if we continue to fault after get_user()
- * below, we need to handle the fault ourselves, while still holding
- * the mmap_sem. This can occur if the uaddr is under contention as
- * we have to drop the mmap_sem in order to call get_user().
- */
spin_unlock(&hb->lock);
put_futex_key(fshared, &key);
- ret = get_user(uval, uaddr);
+ ret = fault_in_user_writeable(uaddr);
if (!ret)
goto retry;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index fba42ed..98e0232 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -335,7 +335,10 @@
.data = &sysctl_timer_migration,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
},
#endif
{
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index c994530..4cde8b9 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -96,7 +96,7 @@
/*
* Collection status, active/inactive:
*/
-static int __read_mostly active;
+int __read_mostly timer_stats_active;
/*
* Beginning/end timestamps of measurement:
@@ -242,7 +242,7 @@
struct entry *entry, input;
unsigned long flags;
- if (likely(!active))
+ if (likely(!timer_stats_active))
return;
lock = &per_cpu(lookup_lock, raw_smp_processor_id());
@@ -254,7 +254,7 @@
input.timer_flag = timer_flag;
spin_lock_irqsave(lock, flags);
- if (!active)
+ if (!timer_stats_active)
goto out_unlock;
entry = tstat_lookup(&input, comm);
@@ -290,7 +290,7 @@
/*
* If still active then calculate up to now:
*/
- if (active)
+ if (timer_stats_active)
time_stop = ktime_get();
time = ktime_sub(time_stop, time_start);
@@ -368,18 +368,18 @@
mutex_lock(&show_mutex);
switch (ctl[0]) {
case '0':
- if (active) {
- active = 0;
+ if (timer_stats_active) {
+ timer_stats_active = 0;
time_stop = ktime_get();
sync_access();
}
break;
case '1':
- if (!active) {
+ if (!timer_stats_active) {
reset_entries();
time_start = ktime_get();
smp_mb();
- active = 1;
+ timer_stats_active = 1;
}
break;
default:
diff --git a/kernel/timer.c b/kernel/timer.c
index 54d3912..0b36b9e 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -380,6 +380,8 @@
{
unsigned int flag = 0;
+ if (likely(!timer->start_site))
+ return;
if (unlikely(tbase_get_deferrable(timer->base)))
flag |= TIMER_STATS_FLAG_DEFERRABLE;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3718d55..f3716bf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -291,7 +291,9 @@
pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
again:
- rec++;
+ if (idx != 0)
+ rec++;
+
if ((void *)rec >= (void *)&pg->records[pg->index]) {
pg = pg->next;
if (!pg)
@@ -1417,10 +1419,20 @@
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
+
+ if (!(iter->flags & FTRACE_ITER_HASH))
+ *pos = 0;
iter->flags |= FTRACE_ITER_HASH;
- return t_hash_next(m, p, pos);
+ iter->hidx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_hash_next(m, p, &l);
+ if (!p)
+ break;
+ }
+ return p;
}
static int t_hash_show(struct seq_file *m, void *v)
@@ -1467,8 +1479,6 @@
iter->pg = iter->pg->next;
iter->idx = 0;
goto retry;
- } else {
- iter->idx = -1;
}
} else {
rec = &iter->pg->records[iter->idx++];
@@ -1497,6 +1507,7 @@
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
mutex_lock(&ftrace_lock);
/*
@@ -1508,23 +1519,21 @@
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
- (*pos)++;
return iter;
}
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_start(m, pos);
- if (*pos > 0) {
- if (iter->idx < 0)
- return p;
- (*pos)--;
- iter->idx--;
+ iter->pg = ftrace_pages_start;
+ iter->idx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_next(m, p, &l);
+ if (!p)
+ break;
}
- p = t_next(m, p, pos);
-
- if (!p)
+ if (!p && iter->flags & FTRACE_ITER_FILTER)
return t_hash_start(m, pos);
return p;
@@ -2500,32 +2509,31 @@
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
{
unsigned long *array = m->private;
- int index = *pos;
- (*pos)++;
-
- if (index >= ftrace_graph_count)
+ if (*pos >= ftrace_graph_count)
return NULL;
+ return &array[*pos];
+}
- return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return __g_next(m, pos);
}
static void *g_start(struct seq_file *m, loff_t *pos)
{
- void *p = NULL;
-
mutex_lock(&graph_lock);
/* Nothing, tell g_show to print all functions are enabled */
if (!ftrace_graph_count && !*pos)
return (void *)1;
- p = g_next(m, p, pos);
-
- return p;
+ return __g_next(m, pos);
}
static void g_stop(struct seq_file *m, void *p)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 04dac26..bf27bb7 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1563,6 +1563,8 @@
return NULL;
}
+#ifdef CONFIG_TRACING
+
#define TRACE_RECURSIVE_DEPTH 16
static int trace_recursive_lock(void)
@@ -1593,6 +1595,13 @@
current->trace_recursion--;
}
+#else
+
+#define trace_recursive_lock() (0)
+#define trace_recursive_unlock() do { } while (0)
+
+#endif
+
static DEFINE_PER_CPU(int, rb_need_resched);
/**
@@ -3104,6 +3113,7 @@
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
+#ifdef CONFIG_TRACING
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -3171,6 +3181,7 @@
}
fs_initcall(rb_init_debugfs);
+#endif
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 076fa6f..3aa0a0d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -284,13 +284,12 @@
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
- int ret;
if (!str)
return 0;
- ret = strict_strtoul(str, 0, &buf_size);
+ buf_size = memparse(str, &str);
/* nr_entries can not be zero */
- if (ret < 0 || buf_size == 0)
+ if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
return 1;
@@ -2053,25 +2052,23 @@
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct tracer *t = m->private;
+ struct tracer *t = v;
(*pos)++;
if (t)
t = t->next;
- m->private = t;
-
return t;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
- struct tracer *t = m->private;
+ struct tracer *t;
loff_t l = 0;
mutex_lock(&trace_types_lock);
- for (; t && l < *pos; t = t_next(m, t, &l))
+ for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
;
return t;
@@ -2107,18 +2104,10 @@
static int show_traces_open(struct inode *inode, struct file *file)
{
- int ret;
-
if (tracing_disabled)
return -ENODEV;
- ret = seq_open(file, &show_traces_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = trace_types;
- }
-
- return ret;
+ return seq_open(file, &show_traces_seq_ops);
}
static ssize_t
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6e735d4..3548ae5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -597,6 +597,7 @@
extern struct pid *ftrace_pid_trace;
+#ifdef CONFIG_FUNCTION_TRACER
static inline int ftrace_trace_task(struct task_struct *task)
{
if (!ftrace_pid_trace)
@@ -604,6 +605,12 @@
return test_tsk_trace_trace(task);
}
+#else
+static inline int ftrace_trace_task(struct task_struct *task)
+{
+ return 1;
+}
+#endif
/*
* trace_iterator_flags is an enumeration that defines bit
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index aa08be6..53c8fd3 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -300,10 +300,18 @@
static void *t_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_event_call *call = NULL;
+ loff_t l;
+
mutex_lock(&event_mutex);
- if (*pos == 0)
- m->private = ftrace_events.next;
- return t_next(m, NULL, pos);
+
+ m->private = ftrace_events.next;
+ for (l = 0; l <= *pos; ) {
+ call = t_next(m, NULL, &l);
+ if (!call)
+ break;
+ }
+ return call;
}
static void *
@@ -332,10 +340,18 @@
static void *s_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_event_call *call = NULL;
+ loff_t l;
+
mutex_lock(&event_mutex);
- if (*pos == 0)
- m->private = ftrace_events.next;
- return s_next(m, NULL, pos);
+
+ m->private = ftrace_events.next;
+ for (l = 0; l <= *pos; ) {
+ call = s_next(m, NULL, &l);
+ if (!call)
+ break;
+ }
+ return call;
}
static int t_show(struct seq_file *m, void *v)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 90f1347..7402144 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -302,8 +302,7 @@
if (count == -1)
seq_printf(m, ":unlimited\n");
else
- seq_printf(m, ":count=%ld", count);
- seq_putc(m, '\n');
+ seq_printf(m, ":count=%ld\n", count);
return 0;
}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 9bece96..7b62781 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -155,25 +155,19 @@
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+t_start(struct seq_file *m, loff_t *pos)
{
- const char **fmt = m->private;
- const char **next = fmt;
-
- (*pos)++;
+ const char **fmt = __start___trace_bprintk_fmt + *pos;
if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
return NULL;
-
- next = fmt;
- m->private = ++next;
-
return fmt;
}
-static void *t_start(struct seq_file *m, loff_t *pos)
+static void *t_next(struct seq_file *m, void * v, loff_t *pos)
{
- return t_next(m, NULL, pos);
+ (*pos)++;
+ return t_start(m, pos);
}
static int t_show(struct seq_file *m, void *v)
@@ -224,15 +218,7 @@
static int
ftrace_formats_open(struct inode *inode, struct file *file)
{
- int ret;
-
- ret = seq_open(file, &show_format_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
-
- m->private = __start___trace_bprintk_fmt;
- }
- return ret;
+ return seq_open(file, &show_format_seq_ops);
}
static const struct file_operations ftrace_formats_fops = {
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index c006437..e66f5e4 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -199,17 +199,13 @@
mutex_lock(&session->stat_mutex);
/* If we are in the beginning of the file, print the headers */
- if (!*pos && session->ts->stat_headers) {
- (*pos)++;
+ if (!*pos && session->ts->stat_headers)
return SEQ_START_TOKEN;
- }
node = rb_first(&session->stat_root);
for (i = 0; node && i < *pos; i++)
node = rb_next(node);
- (*pos)++;
-
return node;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 23067ab..4c32b1a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -340,8 +340,6 @@
bool "Kernel memory leak detector"
depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \
!MEMORY_HOTPLUG
- select DEBUG_SLAB if SLAB
- select SLUB_DEBUG if SLUB
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
select KALLSYMS
@@ -355,6 +353,9 @@
allocations. See Documentation/kmemleak.txt for more
details.
+ Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
+ of finding leaks due to the slab objects poisoning.
+
In order to access the kmemleak file, debugfs needs to be
mounted (usually at /sys/kernel/debug).
diff --git a/lib/checksum.c b/lib/checksum.c
index 12e5a1c..b2e2fd4 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -55,7 +55,11 @@
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
+#ifdef __LITTLE_ENDIAN
result = *buff;
+#else
+ result += (*buff << 8);
+#endif
len--;
buff++;
}
@@ -71,7 +75,7 @@
if (count) {
unsigned long carry = 0;
do {
- unsigned long w = *(unsigned long *) buff;
+ unsigned long w = *(unsigned int *) buff;
count--;
buff += 4;
result += carry;
@@ -87,7 +91,11 @@
}
}
if (len & 1)
+#ifdef __LITTLE_ENDIAN
+ result += *buff;
+#else
result += (*buff << 8);
+#endif
result = from32to16(result);
if (odd)
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a56e6f3..d0351e3 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1985,7 +1985,7 @@
}
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep, int write_access)
+ unsigned long address, pte_t *ptep, unsigned int flags)
{
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
@@ -2053,7 +2053,7 @@
* any allocations necessary to record that reservation occur outside
* the spinlock.
*/
- if (write_access && !(vma->vm_flags & VM_SHARED))
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
@@ -2072,7 +2072,7 @@
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
- if (write_access && !(vma->vm_flags & VM_SHARED)) {
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
}
@@ -2091,7 +2091,7 @@
}
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access)
+ unsigned long address, unsigned int flags)
{
pte_t *ptep;
pte_t entry;
@@ -2112,7 +2112,7 @@
mutex_lock(&hugetlb_instantiation_mutex);
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
- ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
+ ret = hugetlb_no_page(mm, vma, address, ptep, flags);
goto out_mutex;
}
@@ -2126,7 +2126,7 @@
* page now as it is used to determine if a reservation has been
* consumed.
*/
- if (write_access && !pte_write(entry)) {
+ if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto out_mutex;
@@ -2143,7 +2143,7 @@
goto out_page_table_lock;
- if (write_access) {
+ if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep, entry,
pagecache_page);
@@ -2152,7 +2152,8 @@
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
- if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
+ if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+ flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, address, entry);
out_page_table_lock:
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index ec759b6..c96f2c8 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -61,6 +61,8 @@
* structure.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -311,7 +313,7 @@
static void print_referenced(struct kmemleak_object *object)
{
- pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n",
+ pr_info("referenced object 0x%08lx (size %zu)\n",
object->pointer, object->size);
}
@@ -320,7 +322,7 @@
{
int i;
- print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n",
+ print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n",
object->pointer, object->size);
print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies);
@@ -344,7 +346,7 @@
trace.nr_entries = object->trace_len;
trace.entries = object->trace;
- pr_notice("kmemleak: Object 0x%08lx (size %zu):\n",
+ pr_notice("Object 0x%08lx (size %zu):\n",
object->tree_node.start, object->size);
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies);
@@ -372,7 +374,7 @@
object = prio_tree_entry(node, struct kmemleak_object,
tree_node);
if (!alias && object->pointer != ptr) {
- kmemleak_warn("kmemleak: Found object by alias");
+ kmemleak_warn("Found object by alias");
object = NULL;
}
} else
@@ -467,8 +469,7 @@
object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
if (!object) {
- kmemleak_stop("kmemleak: Cannot allocate a kmemleak_object "
- "structure\n");
+ kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
return;
}
@@ -527,8 +528,8 @@
if (node != &object->tree_node) {
unsigned long flags;
- kmemleak_stop("kmemleak: Cannot insert 0x%lx into the object "
- "search tree (already existing)\n", ptr);
+ kmemleak_stop("Cannot insert 0x%lx into the object search tree "
+ "(already existing)\n", ptr);
object = lookup_object(ptr, 1);
spin_lock_irqsave(&object->lock, flags);
dump_object_info(object);
@@ -553,7 +554,7 @@
write_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n",
+ kmemleak_warn("Freeing unknown object at 0x%08lx\n",
ptr);
write_unlock_irqrestore(&kmemleak_lock, flags);
return;
@@ -588,8 +589,7 @@
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n",
- ptr);
+ kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr);
return;
}
@@ -610,8 +610,7 @@
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n",
- ptr);
+ kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
return;
}
@@ -634,21 +633,20 @@
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Adding scan area to unknown "
- "object at 0x%08lx\n", ptr);
+ kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
+ ptr);
return;
}
area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
if (!area) {
- kmemleak_warn("kmemleak: Cannot allocate a scan area\n");
+ kmemleak_warn("Cannot allocate a scan area\n");
goto out;
}
spin_lock_irqsave(&object->lock, flags);
if (offset + length > object->size) {
- kmemleak_warn("kmemleak: Scan area larger than object "
- "0x%08lx\n", ptr);
+ kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object);
kmem_cache_free(scan_area_cache, area);
goto out_unlock;
@@ -677,8 +675,7 @@
object = find_and_get_object(ptr, 0);
if (!object) {
- kmemleak_warn("kmemleak: Not scanning unknown object at "
- "0x%08lx\n", ptr);
+ kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
return;
}
@@ -699,7 +696,7 @@
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
- kmemleak_stop("kmemleak: Early log buffer exceeded\n");
+ kmemleak_stop("Early log buffer exceeded\n");
return;
}
@@ -966,7 +963,7 @@
* 1 reference to any object at this point.
*/
if (atomic_read(&object->use_count) > 1) {
- pr_debug("kmemleak: object->use_count = %d\n",
+ pr_debug("object->use_count = %d\n",
atomic_read(&object->use_count));
dump_object_info(object);
}
@@ -1062,7 +1059,7 @@
{
static int first_run = 1;
- pr_info("kmemleak: Automatic memory scanning thread started\n");
+ pr_info("Automatic memory scanning thread started\n");
/*
* Wait before the first scan to allow the system to fully initialize.
@@ -1108,7 +1105,7 @@
timeout = schedule_timeout_interruptible(timeout);
}
- pr_info("kmemleak: Automatic memory scanning thread ended\n");
+ pr_info("Automatic memory scanning thread ended\n");
return 0;
}
@@ -1123,7 +1120,7 @@
return;
scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
if (IS_ERR(scan_thread)) {
- pr_warning("kmemleak: Failed to create the scan thread\n");
+ pr_warning("Failed to create the scan thread\n");
scan_thread = NULL;
}
}
@@ -1367,7 +1364,7 @@
cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
"kmemleak-clean");
if (IS_ERR(cleanup_thread))
- pr_warning("kmemleak: Failed to create the clean-up thread\n");
+ pr_warning("Failed to create the clean-up thread\n");
}
/*
@@ -1488,8 +1485,7 @@
dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
&kmemleak_fops);
if (!dentry)
- pr_warning("kmemleak: Failed to create the debugfs kmemleak "
- "file\n");
+ pr_warning("Failed to create the debugfs kmemleak file\n");
mutex_lock(&kmemleak_mutex);
start_scan_thread();
mutex_unlock(&kmemleak_mutex);
diff --git a/mm/memory.c b/mm/memory.c
index 98bcb90..6521619 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1207,8 +1207,8 @@
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
- struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, int nr_pages, int flags,
+ struct page **pages, struct vm_area_struct **vmas)
{
int i;
unsigned int vm_flags = 0;
@@ -1217,7 +1217,7 @@
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
- if (len <= 0)
+ if (nr_pages <= 0)
return 0;
/*
* Require read or write permissions.
@@ -1269,7 +1269,7 @@
vmas[i] = gate_vma;
i++;
start += PAGE_SIZE;
- len--;
+ nr_pages--;
continue;
}
@@ -1280,7 +1280,7 @@
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
- &start, &len, i, write);
+ &start, &nr_pages, i, write);
continue;
}
@@ -1311,8 +1311,10 @@
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
- /* FOLL_WRITE matches FAULT_FLAG_WRITE! */
- ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
+ ret = handle_mm_fault(mm, vma, start,
+ (foll_flags & FOLL_WRITE) ?
+ FAULT_FLAG_WRITE : 0);
+
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
@@ -1355,9 +1357,9 @@
vmas[i] = vma;
i++;
start += PAGE_SIZE;
- len--;
- } while (len && start < vma->vm_end);
- } while (len);
+ nr_pages--;
+ } while (nr_pages && start < vma->vm_end);
+ } while (nr_pages);
return i;
}
@@ -1366,7 +1368,7 @@
* @tsk: task_struct of target task
* @mm: mm_struct of target mm
* @start: starting user address
- * @len: number of pages from start to pin
+ * @nr_pages: number of pages from start to pin
* @write: whether pages will be written to by the caller
* @force: whether to force write access even if user mapping is
* readonly. This will result in the page being COWed even
@@ -1378,7 +1380,7 @@
* Or NULL if the caller does not require them.
*
* Returns number of pages pinned. This may be fewer than the number
- * requested. If len is 0 or negative, returns 0. If no pages
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held.
@@ -1412,7 +1414,7 @@
* See also get_user_pages_fast, for performance critical applications.
*/
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+ unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int flags = 0;
@@ -1422,9 +1424,7 @@
if (force)
flags |= GUP_FLAGS_FORCE;
- return __get_user_pages(tsk, mm,
- start, len, flags,
- pages, vmas);
+ return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
EXPORT_SYMBOL(get_user_pages);
@@ -2517,7 +2517,7 @@
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
page = lookup_swap_cache(entry);
if (!page) {
- grab_swap_token(); /* Contend for token _before_ read-in */
+ grab_swap_token(mm); /* Contend for token _before_ read-in */
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
diff --git a/mm/nommu.c b/mm/nommu.c
index 2fd2ad5..bf0cc76 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -173,8 +173,8 @@
}
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
- struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, int nr_pages, int flags,
+ struct page **pages, struct vm_area_struct **vmas)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
@@ -189,7 +189,7 @@
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
- for (i = 0; i < len; i++) {
+ for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
if (!vma)
goto finish_or_fault;
@@ -224,7 +224,7 @@
* - don't permit access to VMAs that don't support it, such as I/O mappings
*/
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int write, int force,
+ unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
int flags = 0;
@@ -234,9 +234,7 @@
if (force)
flags |= GUP_FLAGS_FORCE;
- return __get_user_pages(tsk, mm,
- start, len, flags,
- pages, vmas);
+ return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
EXPORT_SYMBOL(get_user_pages);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 30d5093..5d714f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1153,10 +1153,10 @@
* properly detect and handle allocation failures.
*
* We most definitely don't want callers attempting to
- * allocate greater than single-page units with
+ * allocate greater than order-1 page units with
* __GFP_NOFAIL.
*/
- WARN_ON_ONCE(order > 0);
+ WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);
@@ -3026,7 +3026,7 @@
if (dzone == zone)
break;
kfree(zone_pcp(dzone, cpu));
- zone_pcp(dzone, cpu) = NULL;
+ zone_pcp(dzone, cpu) = &boot_pageset[cpu];
}
return -ENOMEM;
}
@@ -3041,7 +3041,7 @@
/* Free per_cpu_pageset if it is slab allocated */
if (pset != &boot_pageset[cpu])
kfree(pset);
- zone_pcp(zone, cpu) = NULL;
+ zone_pcp(zone, cpu) = &boot_pageset[cpu];
}
}
@@ -4659,7 +4659,7 @@
ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
if (!write || (ret == -EINVAL))
return ret;
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
for_each_online_cpu(cpu) {
unsigned long high;
high = zone->present_pages / percpu_pagelist_fraction;
diff --git a/mm/shmem.c b/mm/shmem.c
index e89d7ec..d713239 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1558,6 +1558,7 @@
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
+ cache_no_acl(inode);
switch (mode & S_IFMT) {
default:
@@ -2388,7 +2389,6 @@
/* only struct inode is valid if it's an inline symlink */
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
}
- shmem_acl_destroy_inode(inode);
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
}
@@ -2397,10 +2397,6 @@
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
inode_init_once(&p->vfs_inode);
-#ifdef CONFIG_TMPFS_POSIX_ACL
- p->i_acl = NULL;
- p->i_default_acl = NULL;
-#endif
}
static int init_inodecache(void)
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index 8e5aadd..606a8e7 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -22,11 +22,11 @@
spin_lock(&inode->i_lock);
switch(type) {
case ACL_TYPE_ACCESS:
- acl = posix_acl_dup(SHMEM_I(inode)->i_acl);
+ acl = posix_acl_dup(inode->i_acl);
break;
case ACL_TYPE_DEFAULT:
- acl = posix_acl_dup(SHMEM_I(inode)->i_default_acl);
+ acl = posix_acl_dup(inode->i_default_acl);
break;
}
spin_unlock(&inode->i_lock);
@@ -45,13 +45,13 @@
spin_lock(&inode->i_lock);
switch(type) {
case ACL_TYPE_ACCESS:
- free = SHMEM_I(inode)->i_acl;
- SHMEM_I(inode)->i_acl = posix_acl_dup(acl);
+ free = inode->i_acl;
+ inode->i_acl = posix_acl_dup(acl);
break;
case ACL_TYPE_DEFAULT:
- free = SHMEM_I(inode)->i_default_acl;
- SHMEM_I(inode)->i_default_acl = posix_acl_dup(acl);
+ free = inode->i_default_acl;
+ inode->i_default_acl = posix_acl_dup(acl);
break;
}
spin_unlock(&inode->i_lock);
@@ -155,23 +155,6 @@
}
/**
- * shmem_acl_destroy_inode - destroy acls hanging off the in-memory inode
- *
- * This is done before destroying the actual inode.
- */
-
-void
-shmem_acl_destroy_inode(struct inode *inode)
-{
- if (SHMEM_I(inode)->i_acl)
- posix_acl_release(SHMEM_I(inode)->i_acl);
- SHMEM_I(inode)->i_acl = NULL;
- if (SHMEM_I(inode)->i_default_acl)
- posix_acl_release(SHMEM_I(inode)->i_default_acl);
- SHMEM_I(inode)->i_default_acl = NULL;
-}
-
-/**
* shmem_check_acl - check_acl() callback for generic_permission()
*/
static int
diff --git a/mm/slub.c b/mm/slub.c
index ce62b77..819f056 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1085,11 +1085,17 @@
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
+ gfp_t alloc_gfp;
flags |= s->allocflags;
- page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
- oo);
+ /*
+ * Let the initial higher-order allocation fail under memory pressure
+ * so we fall-back to the minimum order allocation.
+ */
+ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
+
+ page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
/*
diff --git a/mm/thrash.c b/mm/thrash.c
index c4c5205..2372d4e 100644
--- a/mm/thrash.c
+++ b/mm/thrash.c
@@ -26,47 +26,45 @@
struct mm_struct *swap_token_mm;
static unsigned int global_faults;
-void grab_swap_token(void)
+void grab_swap_token(struct mm_struct *mm)
{
int current_interval;
global_faults++;
- current_interval = global_faults - current->mm->faultstamp;
+ current_interval = global_faults - mm->faultstamp;
if (!spin_trylock(&swap_token_lock))
return;
/* First come first served */
if (swap_token_mm == NULL) {
- current->mm->token_priority = current->mm->token_priority + 2;
- swap_token_mm = current->mm;
+ mm->token_priority = mm->token_priority + 2;
+ swap_token_mm = mm;
goto out;
}
- if (current->mm != swap_token_mm) {
- if (current_interval < current->mm->last_interval)
- current->mm->token_priority++;
+ if (mm != swap_token_mm) {
+ if (current_interval < mm->last_interval)
+ mm->token_priority++;
else {
- if (likely(current->mm->token_priority > 0))
- current->mm->token_priority--;
+ if (likely(mm->token_priority > 0))
+ mm->token_priority--;
}
/* Check if we deserve the token */
- if (current->mm->token_priority >
- swap_token_mm->token_priority) {
- current->mm->token_priority += 2;
- swap_token_mm = current->mm;
+ if (mm->token_priority > swap_token_mm->token_priority) {
+ mm->token_priority += 2;
+ swap_token_mm = mm;
}
} else {
/* Token holder came in again! */
- current->mm->token_priority += 2;
+ mm->token_priority += 2;
}
out:
- current->mm->faultstamp = global_faults;
- current->mm->last_interval = current_interval;
+ mm->faultstamp = global_faults;
+ mm->last_interval = current_interval;
spin_unlock(&swap_token_lock);
-return;
}
/* Called on process exit. */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e8fa2d9..5415526 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -932,7 +932,7 @@
continue;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
- mem_cgroup_del_lru(page);
+ mem_cgroup_del_lru(cursor_page);
nr_taken++;
scan++;
}
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 5f1d210..de56d39 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -437,8 +437,7 @@
int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
- skb->sk = NULL; /* Initially we don't know who it's for */
- skb->destructor = NULL; /* Who initializes this, dammit?! */
+ skb_orphan(skb);
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index baf2dc1..60b5728 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2310,8 +2310,6 @@
if (!skb)
goto out;
- skb_orphan(skb);
-
type = skb->protocol;
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 05ea744..3e70faa 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -85,7 +85,7 @@
}
static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 882a927..3bb6bdb 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -39,14 +39,6 @@
#include "af802154.h"
-#define DBG_DUMP(data, len) { \
- int i; \
- pr_debug("function: %s: data: len %d:\n", __func__, len); \
- for (i = 0; i < len; i++) {\
- pr_debug("%02x: %02x\n", i, (data)[i]); \
- } \
-}
-
/*
* Utility function for families
*/
@@ -302,10 +294,12 @@
static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- DBG_DUMP(skb->data, skb->len);
if (!netif_running(dev))
return -ENODEV;
pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
+#ifdef DEBUG
+ print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
+#endif
if (!net_eq(dev_net(dev), &init_net))
goto drop;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd76b3c..278f46f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1085,8 +1085,35 @@
now = jiffies;
if (!rt_caching(dev_net(rt->u.dst.dev))) {
- rt_drop(rt);
- return 0;
+ /*
+ * If we're not caching, just tell the caller we
+ * were successful and don't touch the route. The
+ * caller hold the sole reference to the cache entry, and
+ * it will be released when the caller is done with it.
+ * If we drop it here, the callers have no way to resolve routes
+ * when we're not caching. Instead, just point *rp at rt, so
+ * the caller gets a single use out of the route
+ * Note that we do rt_free on this new route entry, so that
+ * once its refcount hits zero, we are still able to reap it
+ * (Thanks Alexey)
+ * Note also the rt_free uses call_rcu. We don't actually
+ * need rcu protection here, this is just our path to get
+ * on the route gc list.
+ */
+
+ if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+ int err = arp_bind_neighbour(&rt->u.dst);
+ if (err) {
+ if (net_ratelimit())
+ printk(KERN_WARNING
+ "Neighbour table failure & not caching routes.\n");
+ rt_drop(rt);
+ return err;
+ }
+ }
+
+ rt_free(rt);
+ goto skip_hashing;
}
rthp = &rt_hash_table[hash].chain;
@@ -1203,7 +1230,8 @@
#if RT_CACHE_DEBUG >= 2
if (rt->u.dst.rt_next) {
struct rtable *trt;
- printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst);
+ printk(KERN_DEBUG "rt_cache @%02x: %pI4",
+ hash, &rt->rt_dst);
for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
printk(" . %pI4", &trt->rt_dst);
printk("\n");
@@ -1217,6 +1245,8 @@
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
spin_unlock_bh(rt_hash_lock_addr(hash));
+
+skip_hashing:
if (rp)
*rp = rt;
else
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 52449f7..86f42a2 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -405,7 +405,7 @@
}
static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
struct net *net = dev_net(skb->dev);
struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index c2f2501..678bb95 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -354,7 +354,7 @@
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
struct net *net = dev_net(skb->dev);
struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 36dff88..eab62a7 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -117,7 +117,7 @@
/*
* Slightly more convenient version of icmpv6_send.
*/
-void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
+void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
kfree_skb(skb);
@@ -161,7 +161,7 @@
/*
* Check the ICMP output rate limit
*/
-static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
+static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
struct flowi *fl)
{
struct dst_entry *dst;
@@ -305,7 +305,7 @@
/*
* Send an ICMP message in response to a packet in error
*/
-void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
+void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
struct net_device *dev)
{
struct net *net = dev_net(skb->dev);
@@ -590,7 +590,7 @@
icmpv6_xmit_unlock(sk);
}
-static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
+static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
{
struct inet6_protocol *ipprot;
int inner_offset;
@@ -643,7 +643,7 @@
struct in6_addr *saddr, *daddr;
struct ipv6hdr *orig_hdr;
struct icmp6hdr *hdr;
- int type;
+ u8 type;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
@@ -914,7 +914,7 @@
},
};
-int icmpv6_err_convert(int type, int code, int *err)
+int icmpv6_err_convert(u8 type, u8 code, int *err)
{
int fatal = 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 404d16a..51f410e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -394,13 +394,13 @@
static int
ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
- int *type, int *code, int *msg, __u32 *info, int offset)
+ u8 *type, u8 *code, int *msg, __u32 *info, int offset)
{
struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
struct ip6_tnl *t;
int rel_msg = 0;
- int rel_type = ICMPV6_DEST_UNREACH;
- int rel_code = ICMPV6_ADDR_UNREACH;
+ u8 rel_type = ICMPV6_DEST_UNREACH;
+ u8 rel_code = ICMPV6_ADDR_UNREACH;
__u32 rel_info = 0;
__u16 len;
int err = -ENOENT;
@@ -488,11 +488,11 @@
static int
ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
int rel_msg = 0;
- int rel_type = type;
- int rel_code = code;
+ u8 rel_type = type;
+ u8 rel_code = code;
__u32 rel_info = ntohl(info);
int err;
struct sk_buff *skb2;
@@ -586,11 +586,11 @@
static int
ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
int rel_msg = 0;
- int rel_type = type;
- int rel_code = code;
+ u8 rel_type = type;
+ u8 rel_code = code;
__u32 rel_info = ntohl(info);
int err;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 3a0b3be..79c172f 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -51,7 +51,7 @@
#include <linux/mutex.h>
static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
__be32 spi;
struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index f995e19..f797e8c 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -54,7 +54,7 @@
return data + padlen;
}
-static inline void mip6_param_prob(struct sk_buff *skb, int code, int pos)
+static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8b0b6f9..d6c3c1c 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -310,7 +310,7 @@
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -343,7 +343,7 @@
}
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
- int type, int code, int inner_offset, __be32 info)
+ u8 type, u8 code, int inner_offset, __be32 info)
{
struct sock *sk;
int hash;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 658293e..1473ee0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1865,7 +1865,7 @@
* Drop the packet on the floor
*/
-static int ip6_pkt_drop(struct sk_buff *skb, int code, int ipstats_mib_noroutes)
+static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
{
int type;
struct dst_entry *dst = skb_dst(skb);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 53b6a41..58810c6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -317,7 +317,7 @@
}
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 669f280..633ad78 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -124,7 +124,7 @@
}
static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_tunnel *handler;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 023beda..33b59bd 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -312,7 +312,7 @@
}
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info,
+ u8 type, u8 code, int offset, __be32 info,
struct udp_table *udptable)
{
struct ipv6_pinfo *np;
@@ -346,8 +346,8 @@
}
static __inline__ void udpv6_err(struct sk_buff *skb,
- struct inet6_skb_parm *opt, int type,
- int code, int offset, __be32 info )
+ struct inet6_skb_parm *opt, u8 type,
+ u8 code, int offset, __be32 info )
{
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 2377920..6bb3034 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -9,7 +9,7 @@
extern int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int );
extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
- int , int , int , __be32 , struct udp_table *);
+ u8 , u8 , int , __be32 , struct udp_table *);
extern int udp_v6_get_port(struct sock *sk, unsigned short snum);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index ba162a8..4818c48 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -20,7 +20,7 @@
static void udplitev6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
__udp6_lib_err(skb, opt, type, code, offset, info, &udplite_table);
}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 80193db..81a95c0 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -262,7 +262,7 @@
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
/* xfrm6_tunnel native err handling */
switch (type) {
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 5922feb..cb762c8 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -913,9 +913,6 @@
/* Clean up the original one to keep it in listen state */
irttp_listen(self->tsap);
- /* Wow ! What is that ? Jean II */
- skb->sk = NULL;
- skb->destructor = NULL;
kfree_skb(skb);
sk->sk_ack_backlog--;
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 67c99d2..7ba9661 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -196,6 +196,7 @@
/* Don't forget to refcount it - see ircomm_tty_do_softint() */
skb_get(skb);
+ skb_orphan(skb);
skb->destructor = ircomm_lmp_flow_control;
if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) {
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 656cbd1..6be5f92 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -54,6 +54,38 @@
#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
#define CB_TRGCLS_LEN (TRGCLS_SIZE)
+#define __iucv_sock_wait(sk, condition, timeo, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ long __timeo = timeo; \
+ ret = 0; \
+ while (!(condition)) { \
+ prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
+ if (!__timeo) { \
+ ret = -EAGAIN; \
+ break; \
+ } \
+ if (signal_pending(current)) { \
+ ret = sock_intr_errno(__timeo); \
+ break; \
+ } \
+ release_sock(sk); \
+ __timeo = schedule_timeout(__timeo); \
+ lock_sock(sk); \
+ ret = sock_error(sk); \
+ if (ret) \
+ break; \
+ } \
+ finish_wait(sk->sk_sleep, &__wait); \
+} while (0)
+
+#define iucv_sock_wait(sk, condition, timeo) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __iucv_sock_wait(sk, condition, timeo, __ret); \
+ __ret; \
+})
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
@@ -238,6 +270,48 @@
return msg->length;
}
+/**
+ * iucv_sock_in_state() - check for specific states
+ * @sk: sock structure
+ * @state: first iucv sk state
+ * @state: second iucv sk state
+ *
+ * Returns true if the socket in either in the first or second state.
+ */
+static int iucv_sock_in_state(struct sock *sk, int state, int state2)
+{
+ return (sk->sk_state == state || sk->sk_state == state2);
+}
+
+/**
+ * iucv_below_msglim() - function to check if messages can be sent
+ * @sk: sock structure
+ *
+ * Returns true if the send queue length is lower than the message limit.
+ * Always returns true if the socket is not connected (no iucv path for
+ * checking the message limit).
+ */
+static inline int iucv_below_msglim(struct sock *sk)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (sk->sk_state != IUCV_CONNECTED)
+ return 1;
+ return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+}
+
+/**
+ * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
+ */
+static void iucv_sock_wake_msglim(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ read_unlock(&sk->sk_callback_lock);
+}
+
/* Timers */
static void iucv_sock_timeout(unsigned long arg)
{
@@ -329,7 +403,9 @@
timeo = sk->sk_lingertime;
else
timeo = IUCV_DISCONN_TIMEOUT;
- err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
+ err = iucv_sock_wait(sk,
+ iucv_sock_in_state(sk, IUCV_CLOSED, 0),
+ timeo);
}
case IUCV_CLOSING: /* fall through */
@@ -510,39 +586,6 @@
return NULL;
}
-int iucv_sock_wait_state(struct sock *sk, int state, int state2,
- unsigned long timeo)
-{
- DECLARE_WAITQUEUE(wait, current);
- int err = 0;
-
- add_wait_queue(sk->sk_sleep, &wait);
- while (sk->sk_state != state && sk->sk_state != state2) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
-
- err = sock_error(sk);
- if (err)
- break;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
- return err;
-}
-
/* Bind an unbound socket */
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
@@ -687,8 +730,9 @@
}
if (sk->sk_state != IUCV_CONNECTED) {
- err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
+ err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
+ IUCV_DISCONN),
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
}
if (sk->sk_state == IUCV_DISCONN) {
@@ -842,9 +886,11 @@
struct iucv_message txmsg;
struct cmsghdr *cmsg;
int cmsg_done;
+ long timeo;
char user_id[9];
char appl_id[9];
int err;
+ int noblock = msg->msg_flags & MSG_DONTWAIT;
err = sock_error(sk);
if (err)
@@ -864,108 +910,119 @@
goto out;
}
- if (sk->sk_state == IUCV_CONNECTED) {
- /* initialize defaults */
- cmsg_done = 0; /* check for duplicate headers */
- txmsg.class = 0;
+ /* Return if the socket is not in connected state */
+ if (sk->sk_state != IUCV_CONNECTED) {
+ err = -ENOTCONN;
+ goto out;
+ }
- /* iterate over control messages */
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
- cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ /* initialize defaults */
+ cmsg_done = 0; /* check for duplicate headers */
+ txmsg.class = 0;
- if (!CMSG_OK(msg, cmsg)) {
- err = -EINVAL;
- goto out;
- }
+ /* iterate over control messages */
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
+ cmsg = CMSG_NXTHDR(msg, cmsg)) {
- if (cmsg->cmsg_level != SOL_IUCV)
- continue;
-
- if (cmsg->cmsg_type & cmsg_done) {
- err = -EINVAL;
- goto out;
- }
- cmsg_done |= cmsg->cmsg_type;
-
- switch (cmsg->cmsg_type) {
- case SCM_IUCV_TRGCLS:
- if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
- err = -EINVAL;
- goto out;
- }
-
- /* set iucv message target class */
- memcpy(&txmsg.class,
- (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
-
- break;
-
- default:
- err = -EINVAL;
- goto out;
- break;
- }
- }
-
- /* allocate one skb for each iucv message:
- * this is fine for SOCK_SEQPACKET (unless we want to support
- * segmented records using the MSG_EOR flag), but
- * for SOCK_STREAM we might want to improve it in future */
- if (!(skb = sock_alloc_send_skb(sk, len,
- msg->msg_flags & MSG_DONTWAIT,
- &err)))
+ if (!CMSG_OK(msg, cmsg)) {
+ err = -EINVAL;
goto out;
-
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
- err = -EFAULT;
- goto fail;
}
- /* increment and save iucv message tag for msg_completion cbk */
- txmsg.tag = iucv->send_tag++;
- memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
- skb_queue_tail(&iucv->send_skb_q, skb);
+ if (cmsg->cmsg_level != SOL_IUCV)
+ continue;
- if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
- && skb->len <= 7) {
- err = iucv_send_iprm(iucv->path, &txmsg, skb);
+ if (cmsg->cmsg_type & cmsg_done) {
+ err = -EINVAL;
+ goto out;
+ }
+ cmsg_done |= cmsg->cmsg_type;
- /* on success: there is no message_complete callback
- * for an IPRMDATA msg; remove skb from send queue */
- if (err == 0) {
- skb_unlink(skb, &iucv->send_skb_q);
- kfree_skb(skb);
+ switch (cmsg->cmsg_type) {
+ case SCM_IUCV_TRGCLS:
+ if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
+ err = -EINVAL;
+ goto out;
}
- /* this error should never happen since the
- * IUCV_IPRMDATA path flag is set... sever path */
- if (err == 0x15) {
- iucv_path_sever(iucv->path, NULL);
- skb_unlink(skb, &iucv->send_skb_q);
- err = -EPIPE;
- goto fail;
- }
- } else
- err = iucv_message_send(iucv->path, &txmsg, 0, 0,
- (void *) skb->data, skb->len);
- if (err) {
- if (err == 3) {
- user_id[8] = 0;
- memcpy(user_id, iucv->dst_user_id, 8);
- appl_id[8] = 0;
- memcpy(appl_id, iucv->dst_name, 8);
- pr_err("Application %s on z/VM guest %s"
- " exceeds message limit\n",
- user_id, appl_id);
- }
+ /* set iucv message target class */
+ memcpy(&txmsg.class,
+ (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
+
+ break;
+
+ default:
+ err = -EINVAL;
+ goto out;
+ break;
+ }
+ }
+
+ /* allocate one skb for each iucv message:
+ * this is fine for SOCK_SEQPACKET (unless we want to support
+ * segmented records using the MSG_EOR flag), but
+ * for SOCK_STREAM we might want to improve it in future */
+ skb = sock_alloc_send_skb(sk, len, noblock, &err);
+ if (!skb)
+ goto out;
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ err = -EFAULT;
+ goto fail;
+ }
+
+ /* wait if outstanding messages for iucv path has reached */
+ timeo = sock_sndtimeo(sk, noblock);
+ err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
+ if (err)
+ goto fail;
+
+ /* return -ECONNRESET if the socket is no longer connected */
+ if (sk->sk_state != IUCV_CONNECTED) {
+ err = -ECONNRESET;
+ goto fail;
+ }
+
+ /* increment and save iucv message tag for msg_completion cbk */
+ txmsg.tag = iucv->send_tag++;
+ memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+ skb_queue_tail(&iucv->send_skb_q, skb);
+
+ if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
+ && skb->len <= 7) {
+ err = iucv_send_iprm(iucv->path, &txmsg, skb);
+
+ /* on success: there is no message_complete callback
+ * for an IPRMDATA msg; remove skb from send queue */
+ if (err == 0) {
+ skb_unlink(skb, &iucv->send_skb_q);
+ kfree_skb(skb);
+ }
+
+ /* this error should never happen since the
+ * IUCV_IPRMDATA path flag is set... sever path */
+ if (err == 0x15) {
+ iucv_path_sever(iucv->path, NULL);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
}
-
- } else {
- err = -ENOTCONN;
- goto out;
+ } else
+ err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+ (void *) skb->data, skb->len);
+ if (err) {
+ if (err == 3) {
+ user_id[8] = 0;
+ memcpy(user_id, iucv->dst_user_id, 8);
+ appl_id[8] = 0;
+ memcpy(appl_id, iucv->dst_name, 8);
+ pr_err("Application %s on z/VM guest %s"
+ " exceeds message limit\n",
+ appl_id, user_id);
+ err = -EAGAIN;
+ } else
+ err = -EPIPE;
+ skb_unlink(skb, &iucv->send_skb_q);
+ goto fail;
}
release_sock(sk);
@@ -1581,7 +1638,11 @@
spin_unlock_irqrestore(&list->lock, flags);
- kfree_skb(this);
+ if (this) {
+ kfree_skb(this);
+ /* wake up any process waiting for sending */
+ iucv_sock_wake_msglim(sk);
+ }
}
BUG_ON(!this);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5f72b94..7508f11 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -335,7 +335,8 @@
h = __nf_conntrack_find(net, tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
- if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ if (unlikely(nf_ct_is_dying(ct) ||
+ !atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
else {
if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
@@ -425,7 +426,6 @@
/* Remove from unconfirmed list */
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
- __nf_conntrack_hash_insert(ct, hash, repl_hash);
/* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
@@ -433,8 +433,16 @@
add_timer(&ct->timeout);
atomic_inc(&ct->ct_general.use);
set_bit(IPS_CONFIRMED_BIT, &ct->status);
+
+ /* Since the lookup is lockless, hash insertion must be done after
+ * starting the timer and setting the CONFIRMED bit. The RCU barriers
+ * guarantee that no other CPU can find the conntrack before the above
+ * stores are visible.
+ */
+ __nf_conntrack_hash_insert(ct, hash, repl_hash);
NF_CT_STAT_INC(net, insert);
spin_unlock_bh(&nf_conntrack_lock);
+
help = nfct_help(ct);
if (help && help->helper)
nf_conntrack_event_cache(IPCT_HELPER, ct);
@@ -503,7 +511,8 @@
cnt++;
}
- if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ if (ct && unlikely(nf_ct_is_dying(ct) ||
+ !atomic_inc_not_zero(&ct->ct_general.use)))
ct = NULL;
if (ct || cnt >= NF_CT_EVICTION_RANGE)
break;
@@ -1267,13 +1276,19 @@
return ret;
}
+/*
+ * We need to use special "null" values, not used in hash table
+ */
+#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
+#define DYING_NULLS_VAL ((1<<30)+1)
+
static int nf_conntrack_init_net(struct net *net)
{
int ret;
atomic_set(&net->ct.count, 0);
- INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
- INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0);
+ INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
+ INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
if (!net->ct.stat) {
ret = -ENOMEM;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 2fefe14..4e62030 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -47,7 +47,6 @@
mutex_lock(&nf_log_mutex);
if (pf == NFPROTO_UNSPEC) {
- int i;
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
} else {
@@ -216,7 +215,7 @@
#endif /* PROC_FS */
#ifdef CONFIG_SYSCTL
-struct ctl_path nf_log_sysctl_path[] = {
+static struct ctl_path nf_log_sysctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "netfilter", .ctl_name = NET_NETFILTER, },
{ .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, },
@@ -228,19 +227,26 @@
static struct ctl_table_header *nf_log_dir_header;
static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
const struct nf_logger *logger;
+ char buf[NFLOGGER_NAME_LEN];
+ size_t size = *lenp;
int r = 0;
int tindex = (unsigned long)table->extra1;
if (write) {
- if (!strcmp(buffer, "NONE")) {
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+ if (copy_from_user(buf, buffer, size))
+ return -EFAULT;
+
+ if (!strcmp(buf, "NONE")) {
nf_log_unbind_pf(tindex);
return 0;
}
mutex_lock(&nf_log_mutex);
- logger = __find_logger(tindex, buffer);
+ logger = __find_logger(tindex, buf);
if (logger == NULL) {
mutex_unlock(&nf_log_mutex);
return -ENOENT;
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 498b451..f28f6a5 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -40,12 +40,12 @@
static u32 hash_v4(const struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
- u32 ipaddr;
+ __be32 ipaddr;
/* packets in either direction go into same queue */
ipaddr = iph->saddr ^ iph->daddr;
- return jhash_2words(ipaddr, iph->protocol, jhash_initval);
+ return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
}
static unsigned int
@@ -63,14 +63,14 @@
static u32 hash_v6(const struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- u32 addr[4];
+ __be32 addr[4];
addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0];
addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1];
addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2];
addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3];
- return jhash2(addr, ARRAY_SIZE(addr), jhash_initval);
+ return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
}
static unsigned int
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 69a639f..225ee3e 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -15,14 +15,14 @@
#include <net/netfilter/nf_conntrack.h>
#include <linux/netfilter/xt_cluster.h>
-static inline u_int32_t nf_ct_orig_ipv4_src(const struct nf_conn *ct)
+static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct)
{
- return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+ return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
}
-static inline const void *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
+static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
{
- return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
+ return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
}
static inline u_int32_t
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 01dd07b..98fc190 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -54,6 +54,7 @@
if (q->master == NULL)
return -ENOMEM;
+ q->master->quota = q->quota;
return true;
}
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 220a1d5..4fc6a91 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -66,7 +66,7 @@
if (info->flags & XT_RATEEST_MATCH_BPS)
ret &= bps1 == bps2;
if (info->flags & XT_RATEEST_MATCH_PPS)
- ret &= pps2 == pps2;
+ ret &= pps1 == pps2;
break;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 4e68ab4..79693fe 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -56,7 +56,6 @@
u32 idx;
bool registered;
- bool suspended;
bool persistent;
const struct rfkill_ops *ops;
@@ -224,7 +223,7 @@
static void rfkill_event(struct rfkill *rfkill)
{
- if (!rfkill->registered || rfkill->suspended)
+ if (!rfkill->registered)
return;
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
@@ -270,6 +269,9 @@
unsigned long flags;
int err;
+ if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
+ return;
+
/*
* Some platforms (...!) generate input events which affect the
* _hard_ kill state -- whenever something tries to change the
@@ -292,9 +294,6 @@
rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
spin_unlock_irqrestore(&rfkill->lock, flags);
- if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
- return;
-
err = rfkill->ops->set_block(rfkill->data, blocked);
spin_lock_irqsave(&rfkill->lock, flags);
@@ -508,19 +507,32 @@
blocked = blocked || hwblock;
spin_unlock_irqrestore(&rfkill->lock, flags);
- if (!rfkill->registered) {
- rfkill->persistent = true;
- } else {
- if (prev != blocked && !hwblock)
- schedule_work(&rfkill->uevent_work);
+ if (!rfkill->registered)
+ return blocked;
- rfkill_led_trigger_event(rfkill);
- }
+ if (prev != blocked && !hwblock)
+ schedule_work(&rfkill->uevent_work);
+
+ rfkill_led_trigger_event(rfkill);
return blocked;
}
EXPORT_SYMBOL(rfkill_set_sw_state);
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+ unsigned long flags;
+
+ BUG_ON(!rfkill);
+ BUG_ON(rfkill->registered);
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ __rfkill_set_sw_state(rfkill, blocked);
+ rfkill->persistent = true;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+}
+EXPORT_SYMBOL(rfkill_init_sw_state);
+
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
{
unsigned long flags;
@@ -598,6 +610,15 @@
return sprintf(buf, "%d\n", rfkill->idx);
}
+static ssize_t rfkill_persistent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", rfkill->persistent);
+}
+
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
@@ -656,6 +677,7 @@
__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
__ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
+ __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
__ATTR_NULL
@@ -718,8 +740,6 @@
rfkill_pause_polling(rfkill);
- rfkill->suspended = true;
-
return 0;
}
@@ -728,10 +748,10 @@
struct rfkill *rfkill = to_rfkill(dev);
bool cur;
- cur = !!(rfkill->state & RFKILL_BLOCK_SW);
- rfkill_set_block(rfkill, cur);
-
- rfkill->suspended = false;
+ if (!rfkill->persistent) {
+ cur = !!(rfkill->state & RFKILL_BLOCK_SW);
+ rfkill_set_block(rfkill, cur);
+ }
rfkill_resume_polling(rfkill);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index a63de3f..6a4b190 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -133,7 +133,7 @@
/* ICMP error handler. */
SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __be32 info)
+ u8 type, u8 code, int offset, __be32 info)
{
struct inet6_dev *idev;
struct sock *sk;
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 5369aa3..db73fd2 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -13,5 +13,6 @@
rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o \
svc_xprt.o
+sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
sunrpc-$(CONFIG_PROC_FS) += stats.o
sunrpc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
new file mode 100644
index 0000000..553621f
--- /dev/null
+++ b/net/sunrpc/backchannel_rqst.c
@@ -0,0 +1,281 @@
+/******************************************************************************
+
+(c) 2007 Network Appliance, Inc. All Rights Reserved.
+(c) 2009 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include <linux/tcp.h>
+#include <linux/sunrpc/xprt.h>
+
+#ifdef RPC_DEBUG
+#define RPCDBG_FACILITY RPCDBG_TRANS
+#endif
+
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * Helper routines that track the number of preallocation elements
+ * on the transport.
+ */
+static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
+{
+ return xprt->bc_alloc_count > 0;
+}
+
+static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
+{
+ xprt->bc_alloc_count += n;
+}
+
+static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
+{
+ return xprt->bc_alloc_count -= n;
+}
+
+/*
+ * Free the preallocated rpc_rqst structure and the memory
+ * buffers hanging off of it.
+ */
+static void xprt_free_allocation(struct rpc_rqst *req)
+{
+ struct xdr_buf *xbufp;
+
+ dprintk("RPC: free allocations for req= %p\n", req);
+ BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+ xbufp = &req->rq_private_buf;
+ free_page((unsigned long)xbufp->head[0].iov_base);
+ xbufp = &req->rq_snd_buf;
+ free_page((unsigned long)xbufp->head[0].iov_base);
+ list_del(&req->rq_bc_pa_list);
+ kfree(req);
+}
+
+/*
+ * Preallocate up to min_reqs structures and related buffers for use
+ * by the backchannel. This function can be called multiple times
+ * when creating new sessions that use the same rpc_xprt. The
+ * preallocated buffers are added to the pool of resources used by
+ * the rpc_xprt. Anyone of these resources may be used used by an
+ * incoming callback request. It's up to the higher levels in the
+ * stack to enforce that the maximum number of session slots is not
+ * being exceeded.
+ *
+ * Some callback arguments can be large. For example, a pNFS server
+ * using multiple deviceids. The list can be unbound, but the client
+ * has the ability to tell the server the maximum size of the callback
+ * requests. Each deviceID is 16 bytes, so allocate one page
+ * for the arguments to have enough room to receive a number of these
+ * deviceIDs. The NFS client indicates to the pNFS server that its
+ * callback requests can be up to 4096 bytes in size.
+ */
+int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
+{
+ struct page *page_rcv = NULL, *page_snd = NULL;
+ struct xdr_buf *xbufp = NULL;
+ struct rpc_rqst *req, *tmp;
+ struct list_head tmp_list;
+ int i;
+
+ dprintk("RPC: setup backchannel transport\n");
+
+ /*
+ * We use a temporary list to keep track of the preallocated
+ * buffers. Once we're done building the list we splice it
+ * into the backchannel preallocation list off of the rpc_xprt
+ * struct. This helps minimize the amount of time the list
+ * lock is held on the rpc_xprt struct. It also makes cleanup
+ * easier in case of memory allocation errors.
+ */
+ INIT_LIST_HEAD(&tmp_list);
+ for (i = 0; i < min_reqs; i++) {
+ /* Pre-allocate one backchannel rpc_rqst */
+ req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
+ if (req == NULL) {
+ printk(KERN_ERR "Failed to create bc rpc_rqst\n");
+ goto out_free;
+ }
+
+ /* Add the allocated buffer to the tmp list */
+ dprintk("RPC: adding req= %p\n", req);
+ list_add(&req->rq_bc_pa_list, &tmp_list);
+
+ req->rq_xprt = xprt;
+ INIT_LIST_HEAD(&req->rq_list);
+ INIT_LIST_HEAD(&req->rq_bc_list);
+
+ /* Preallocate one XDR receive buffer */
+ page_rcv = alloc_page(GFP_KERNEL);
+ if (page_rcv == NULL) {
+ printk(KERN_ERR "Failed to create bc receive xbuf\n");
+ goto out_free;
+ }
+ xbufp = &req->rq_rcv_buf;
+ xbufp->head[0].iov_base = page_address(page_rcv);
+ xbufp->head[0].iov_len = PAGE_SIZE;
+ xbufp->tail[0].iov_base = NULL;
+ xbufp->tail[0].iov_len = 0;
+ xbufp->page_len = 0;
+ xbufp->len = PAGE_SIZE;
+ xbufp->buflen = PAGE_SIZE;
+
+ /* Preallocate one XDR send buffer */
+ page_snd = alloc_page(GFP_KERNEL);
+ if (page_snd == NULL) {
+ printk(KERN_ERR "Failed to create bc snd xbuf\n");
+ goto out_free;
+ }
+
+ xbufp = &req->rq_snd_buf;
+ xbufp->head[0].iov_base = page_address(page_snd);
+ xbufp->head[0].iov_len = 0;
+ xbufp->tail[0].iov_base = NULL;
+ xbufp->tail[0].iov_len = 0;
+ xbufp->page_len = 0;
+ xbufp->len = 0;
+ xbufp->buflen = PAGE_SIZE;
+ }
+
+ /*
+ * Add the temporary list to the backchannel preallocation list
+ */
+ spin_lock_bh(&xprt->bc_pa_lock);
+ list_splice(&tmp_list, &xprt->bc_pa_list);
+ xprt_inc_alloc_count(xprt, min_reqs);
+ spin_unlock_bh(&xprt->bc_pa_lock);
+
+ dprintk("RPC: setup backchannel transport done\n");
+ return 0;
+
+out_free:
+ /*
+ * Memory allocation failed, free the temporary list
+ */
+ list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
+ xprt_free_allocation(req);
+
+ dprintk("RPC: setup backchannel transport failed\n");
+ return -1;
+}
+EXPORT_SYMBOL(xprt_setup_backchannel);
+
+/*
+ * Destroys the backchannel preallocated structures.
+ * Since these structures may have been allocated by multiple calls
+ * to xprt_setup_backchannel, we only destroy up to the maximum number
+ * of reqs specified by the caller.
+ * @xprt: the transport holding the preallocated strucures
+ * @max_reqs the maximum number of preallocated structures to destroy
+ */
+void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
+{
+ struct rpc_rqst *req = NULL, *tmp = NULL;
+
+ dprintk("RPC: destroy backchannel transport\n");
+
+ BUG_ON(max_reqs == 0);
+ spin_lock_bh(&xprt->bc_pa_lock);
+ xprt_dec_alloc_count(xprt, max_reqs);
+ list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
+ dprintk("RPC: req=%p\n", req);
+ xprt_free_allocation(req);
+ if (--max_reqs == 0)
+ break;
+ }
+ spin_unlock_bh(&xprt->bc_pa_lock);
+
+ dprintk("RPC: backchannel list empty= %s\n",
+ list_empty(&xprt->bc_pa_list) ? "true" : "false");
+}
+EXPORT_SYMBOL(xprt_destroy_backchannel);
+
+/*
+ * One or more rpc_rqst structure have been preallocated during the
+ * backchannel setup. Buffer space for the send and private XDR buffers
+ * has been preallocated as well. Use xprt_alloc_bc_request to allocate
+ * to this request. Use xprt_free_bc_request to return it.
+ *
+ * We know that we're called in soft interrupt context, grab the spin_lock
+ * since there is no need to grab the bottom half spin_lock.
+ *
+ * Return an available rpc_rqst, otherwise NULL if non are available.
+ */
+struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
+{
+ struct rpc_rqst *req;
+
+ dprintk("RPC: allocate a backchannel request\n");
+ spin_lock(&xprt->bc_pa_lock);
+ if (!list_empty(&xprt->bc_pa_list)) {
+ req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
+ rq_bc_pa_list);
+ list_del(&req->rq_bc_pa_list);
+ } else {
+ req = NULL;
+ }
+ spin_unlock(&xprt->bc_pa_lock);
+
+ if (req != NULL) {
+ set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+ req->rq_reply_bytes_recvd = 0;
+ req->rq_bytes_sent = 0;
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
+ sizeof(req->rq_private_buf));
+ }
+ dprintk("RPC: backchannel req=%p\n", req);
+ return req;
+}
+
+/*
+ * Return the preallocated rpc_rqst structure and XDR buffers
+ * associated with this rpc_task.
+ */
+void xprt_free_bc_request(struct rpc_rqst *req)
+{
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ dprintk("RPC: free backchannel req=%p\n", req);
+
+ smp_mb__before_clear_bit();
+ BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+ clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+ smp_mb__after_clear_bit();
+
+ if (!xprt_need_to_requeue(xprt)) {
+ /*
+ * The last remaining session was destroyed while this
+ * entry was in use. Free the entry and don't attempt
+ * to add back to the list because there is no need to
+ * have anymore preallocated entries.
+ */
+ dprintk("RPC: Last session removed req=%p\n", req);
+ xprt_free_allocation(req);
+ return;
+ }
+
+ /*
+ * Return it to the list of preallocations so that it
+ * may be reused by a new callback request.
+ */
+ spin_lock_bh(&xprt->bc_pa_lock);
+ list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+ spin_unlock_bh(&xprt->bc_pa_lock);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
new file mode 100644
index 0000000..13f214f
--- /dev/null
+++ b/net/sunrpc/bc_svc.c
@@ -0,0 +1,81 @@
+/******************************************************************************
+
+(c) 2007 Network Appliance, Inc. All Rights Reserved.
+(c) 2009 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * The NFSv4.1 callback service helper routines.
+ * They implement the transport level processing required to send the
+ * reply over an existing open connection previously established by the client.
+ */
+
+#if defined(CONFIG_NFS_V4_1)
+
+#include <linux/module.h>
+
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/bc_xprt.h>
+
+#define RPCDBG_FACILITY RPCDBG_SVCDSP
+
+void bc_release_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ dprintk("RPC: bc_release_request: task= %p\n", task);
+
+ /*
+ * Release this request only if it's a backchannel
+ * preallocated request
+ */
+ if (!bc_prealloc(req))
+ return;
+ xprt_free_bc_request(req);
+}
+
+/* Empty callback ops */
+static const struct rpc_call_ops nfs41_callback_ops = {
+};
+
+
+/*
+ * Send the callback reply
+ */
+int bc_send(struct rpc_rqst *req)
+{
+ struct rpc_task *task;
+ int ret;
+
+ dprintk("RPC: bc_send req= %p\n", req);
+ task = rpc_run_bc_task(req, &nfs41_callback_ops);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else {
+ BUG_ON(atomic_read(&task->tk_count) != 1);
+ ret = task->tk_status;
+ rpc_put_task(task);
+ }
+ return ret;
+ dprintk("RPC: bc_send ret= %d \n", ret);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 20029a7..ff0c230 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -488,7 +488,7 @@
{
int delay = 5;
if (cache_clean() == -1)
- delay = 30*HZ;
+ delay = round_jiffies_relative(30*HZ);
if (list_empty(&cache_list))
delay = 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 5abab09..5bc2f45 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -36,7 +36,9 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/metrics.h>
+#include <linux/sunrpc/bc_xprt.h>
+#include "sunrpc.h"
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_CALL
@@ -63,6 +65,9 @@
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
+#if defined(CONFIG_NFS_V4_1)
+static void call_bc_transmit(struct rpc_task *task);
+#endif /* CONFIG_NFS_V4_1 */
static void call_status(struct rpc_task *task);
static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
@@ -613,6 +618,50 @@
}
EXPORT_SYMBOL_GPL(rpc_call_async);
+#if defined(CONFIG_NFS_V4_1)
+/**
+ * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
+ * rpc_execute against it
+ * @ops: RPC call ops
+ */
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ const struct rpc_call_ops *tk_ops)
+{
+ struct rpc_task *task;
+ struct xdr_buf *xbufp = &req->rq_snd_buf;
+ struct rpc_task_setup task_setup_data = {
+ .callback_ops = tk_ops,
+ };
+
+ dprintk("RPC: rpc_run_bc_task req= %p\n", req);
+ /*
+ * Create an rpc_task to send the data
+ */
+ task = rpc_new_task(&task_setup_data);
+ if (!task) {
+ xprt_free_bc_request(req);
+ goto out;
+ }
+ task->tk_rqstp = req;
+
+ /*
+ * Set up the xdr_buf length.
+ * This also indicates that the buffer is XDR encoded already.
+ */
+ xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
+ xbufp->tail[0].iov_len;
+
+ task->tk_action = call_bc_transmit;
+ atomic_inc(&task->tk_count);
+ BUG_ON(atomic_read(&task->tk_count) != 2);
+ rpc_execute(task);
+
+out:
+ dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
+ return task;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
void
rpc_call_start(struct rpc_task *task)
{
@@ -695,6 +744,19 @@
EXPORT_SYMBOL_GPL(rpc_force_rebind);
/*
+ * Restart an (async) RPC call from the call_prepare state.
+ * Usually called from within the exit handler.
+ */
+void
+rpc_restart_call_prepare(struct rpc_task *task)
+{
+ if (RPC_ASSASSINATED(task))
+ return;
+ task->tk_action = rpc_prepare_task;
+}
+EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
+
+/*
* Restart an (async) RPC call. Usually called from within the
* exit handler.
*/
@@ -1085,7 +1147,7 @@
* in order to allow access to the socket to other RPC requests.
*/
call_transmit_status(task);
- if (task->tk_msg.rpc_proc->p_decode != NULL)
+ if (rpc_reply_expected(task))
return;
task->tk_action = rpc_exit_task;
rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
@@ -1120,6 +1182,72 @@
}
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * 5b. Send the backchannel RPC reply. On error, drop the reply. In
+ * addition, disconnect on connectivity errors.
+ */
+static void
+call_bc_transmit(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ BUG_ON(task->tk_status != 0);
+ task->tk_status = xprt_prepare_transmit(task);
+ if (task->tk_status == -EAGAIN) {
+ /*
+ * Could not reserve the transport. Try again after the
+ * transport is released.
+ */
+ task->tk_status = 0;
+ task->tk_action = call_bc_transmit;
+ return;
+ }
+
+ task->tk_action = rpc_exit_task;
+ if (task->tk_status < 0) {
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ return;
+ }
+
+ xprt_transmit(task);
+ xprt_end_transmit(task);
+ dprint_status(task);
+ switch (task->tk_status) {
+ case 0:
+ /* Success */
+ break;
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -ETIMEDOUT:
+ /*
+ * Problem reaching the server. Disconnect and let the
+ * forechannel reestablish the connection. The server will
+ * have to retransmit the backchannel request and we'll
+ * reprocess it. Since these ops are idempotent, there's no
+ * need to cache our reply at this time.
+ */
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ xprt_conditional_disconnect(task->tk_xprt,
+ req->rq_connect_cookie);
+ break;
+ default:
+ /*
+ * We were unable to reply and will have to drop the
+ * request. The server should reconnect and retransmit.
+ */
+ BUG_ON(task->tk_status == -EAGAIN);
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ break;
+ }
+ rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* 6. Sort out the RPC call status
*/
@@ -1130,8 +1258,8 @@
struct rpc_rqst *req = task->tk_rqstp;
int status;
- if (req->rq_received > 0 && !req->rq_bytes_sent)
- task->tk_status = req->rq_received;
+ if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
+ task->tk_status = req->rq_reply_bytes_recvd;
dprint_status(task);
@@ -1248,7 +1376,7 @@
/*
* Ensure that we see all writes made by xprt_complete_rqst()
- * before it changed req->rq_received.
+ * before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
req->rq_rcv_buf.len = req->rq_private_buf.len;
@@ -1289,7 +1417,7 @@
task->tk_status = 0;
/* Note: rpc_verify_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
- req->rq_received = req->rq_rcv_buf.len = 0;
+ req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
@@ -1377,13 +1505,14 @@
}
if ((len -= 3) < 0)
goto out_overflow;
- p += 1; /* skip XID */
+ p += 1; /* skip XID */
if ((n = ntohl(*p++)) != RPC_REPLY) {
dprintk("RPC: %5u %s: not an RPC reply: %x\n",
- task->tk_pid, __func__, n);
+ task->tk_pid, __func__, n);
goto out_garbage;
}
+
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
if (--len < 0)
goto out_overflow;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index ff50a05..1102ce1 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -569,7 +569,7 @@
/*
* Helper to call task->tk_ops->rpc_call_prepare
*/
-static void rpc_prepare_task(struct rpc_task *task)
+void rpc_prepare_task(struct rpc_task *task)
{
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 1ef6e46..1b4e679 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -141,12 +141,14 @@
void rpc_count_iostats(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_iostats *stats = task->tk_client->cl_metrics;
+ struct rpc_iostats *stats;
struct rpc_iostats *op_metrics;
long rtt, execute, queue;
- if (!stats || !req)
+ if (!task->tk_client || !task->tk_client->cl_metrics || !req)
return;
+
+ stats = task->tk_client->cl_metrics;
op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
op_metrics->om_ops++;
@@ -154,7 +156,7 @@
op_metrics->om_timeouts += task->tk_timeouts;
op_metrics->om_bytes_sent += task->tk_bytes_sent;
- op_metrics->om_bytes_recv += req->rq_received;
+ op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
queue = (long)req->rq_xtime - task->tk_start;
if (queue < 0)
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
new file mode 100644
index 0000000..5d9dd74
--- /dev/null
+++ b/net/sunrpc/sunrpc.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+
+(c) 2008 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * Functions and macros used internally by RPC
+ */
+
+#ifndef _NET_SUNRPC_SUNRPC_H
+#define _NET_SUNRPC_SUNRPC_H
+
+static inline int rpc_reply_expected(struct rpc_task *task)
+{
+ return (task->tk_msg.rpc_proc != NULL) &&
+ (task->tk_msg.rpc_proc->p_decode != NULL);
+}
+
+#endif /* _NET_SUNRPC_SUNRPC_H */
+
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5ed8931..952f206 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -25,6 +25,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/bc_xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCDSP
@@ -486,6 +487,10 @@
if (svc_serv_is_pooled(serv))
svc_pool_map_put();
+#if defined(CONFIG_NFS_V4_1)
+ svc_sock_destroy(serv->bc_xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
svc_unregister(serv);
kfree(serv->sv_pools);
kfree(serv);
@@ -970,20 +975,18 @@
}
/*
- * Process the RPC request.
+ * Common routine for processing the RPC request.
*/
-int
-svc_process(struct svc_rqst *rqstp)
+static int
+svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
{
struct svc_program *progp;
struct svc_version *versp = NULL; /* compiler food */
struct svc_procedure *procp = NULL;
- struct kvec * argv = &rqstp->rq_arg.head[0];
- struct kvec * resv = &rqstp->rq_res.head[0];
struct svc_serv *serv = rqstp->rq_server;
kxdrproc_t xdr;
__be32 *statp;
- u32 dir, prog, vers, proc;
+ u32 prog, vers, proc;
__be32 auth_stat, rpc_stat;
int auth_res;
__be32 *reply_statp;
@@ -993,19 +996,6 @@
if (argv->iov_len < 6*4)
goto err_short_len;
- /* setup response xdr_buf.
- * Initially it has just one page
- */
- rqstp->rq_resused = 1;
- resv->iov_base = page_address(rqstp->rq_respages[0]);
- resv->iov_len = 0;
- rqstp->rq_res.pages = rqstp->rq_respages + 1;
- rqstp->rq_res.len = 0;
- rqstp->rq_res.page_base = 0;
- rqstp->rq_res.page_len = 0;
- rqstp->rq_res.buflen = PAGE_SIZE;
- rqstp->rq_res.tail[0].iov_base = NULL;
- rqstp->rq_res.tail[0].iov_len = 0;
/* Will be turned off only in gss privacy case: */
rqstp->rq_splice_ok = 1;
/* Will be turned off only when NFSv4 Sessions are used */
@@ -1014,17 +1004,13 @@
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
- rqstp->rq_xid = svc_getu32(argv);
svc_putu32(resv, rqstp->rq_xid);
- dir = svc_getnl(argv);
vers = svc_getnl(argv);
/* First words of reply: */
svc_putnl(resv, 1); /* REPLY */
- if (dir != 0) /* direction != CALL */
- goto err_bad_dir;
if (vers != 2) /* RPC version number */
goto err_bad_rpc;
@@ -1147,7 +1133,7 @@
sendit:
if (svc_authorise(rqstp))
goto dropit;
- return svc_send(rqstp);
+ return 1; /* Caller can now send it */
dropit:
svc_authorise(rqstp); /* doesn't hurt to call this twice */
@@ -1161,12 +1147,6 @@
goto dropit; /* drop request */
-err_bad_dir:
- svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
-
- serv->sv_stats->rpcbadfmt++;
- goto dropit; /* drop request */
-
err_bad_rpc:
serv->sv_stats->rpcbadfmt++;
svc_putnl(resv, 1); /* REJECT */
@@ -1220,6 +1200,100 @@
EXPORT_SYMBOL_GPL(svc_process);
/*
+ * Process the RPC request.
+ */
+int
+svc_process(struct svc_rqst *rqstp)
+{
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
+ struct svc_serv *serv = rqstp->rq_server;
+ u32 dir;
+ int error;
+
+ /*
+ * Setup response xdr_buf.
+ * Initially it has just one page
+ */
+ rqstp->rq_resused = 1;
+ resv->iov_base = page_address(rqstp->rq_respages[0]);
+ resv->iov_len = 0;
+ rqstp->rq_res.pages = rqstp->rq_respages + 1;
+ rqstp->rq_res.len = 0;
+ rqstp->rq_res.page_base = 0;
+ rqstp->rq_res.page_len = 0;
+ rqstp->rq_res.buflen = PAGE_SIZE;
+ rqstp->rq_res.tail[0].iov_base = NULL;
+ rqstp->rq_res.tail[0].iov_len = 0;
+
+ rqstp->rq_xid = svc_getu32(argv);
+
+ dir = svc_getnl(argv);
+ if (dir != 0) {
+ /* direction != CALL */
+ svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
+ serv->sv_stats->rpcbadfmt++;
+ svc_drop(rqstp);
+ return 0;
+ }
+
+ error = svc_process_common(rqstp, argv, resv);
+ if (error <= 0)
+ return error;
+
+ return svc_send(rqstp);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Process a backchannel RPC request that arrived over an existing
+ * outbound connection
+ */
+int
+bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
+ struct svc_rqst *rqstp)
+{
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
+ int error;
+
+ /* Build the svc_rqst used by the common processing routine */
+ rqstp->rq_xprt = serv->bc_xprt;
+ rqstp->rq_xid = req->rq_xid;
+ rqstp->rq_prot = req->rq_xprt->prot;
+ rqstp->rq_server = serv;
+
+ rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
+ memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
+ memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
+ memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
+
+ /* reset result send buffer "put" position */
+ resv->iov_len = 0;
+
+ if (rqstp->rq_prot != IPPROTO_TCP) {
+ printk(KERN_ERR "No support for Non-TCP transports!\n");
+ BUG();
+ }
+
+ /*
+ * Skip the next two words because they've already been
+ * processed in the trasport
+ */
+ svc_getu32(argv); /* XID */
+ svc_getnl(argv); /* CALLDIR */
+
+ error = svc_process_common(rqstp, argv, resv);
+ if (error <= 0)
+ return error;
+
+ memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
+ return bc_send(req);
+}
+EXPORT_SYMBOL(bc_svc_process);
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
* Return (transport-specific) limit on the rpc payload.
*/
u32 svc_max_payload(const struct svc_rqst *rqstp)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c200d92..6f33d33 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -11,6 +11,7 @@
#include <net/sock.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svc_xprt.h>
+#include <linux/sunrpc/svcsock.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -1097,36 +1098,58 @@
}
EXPORT_SYMBOL_GPL(svc_find_xprt);
-/*
- * Format a buffer with a list of the active transports. A zero for
- * the buflen parameter disables target buffer overflow checking.
+static int svc_one_xprt_name(const struct svc_xprt *xprt,
+ char *pos, int remaining)
+{
+ int len;
+
+ len = snprintf(pos, remaining, "%s %u\n",
+ xprt->xpt_class->xcl_name,
+ svc_xprt_local_port(xprt));
+ if (len >= remaining)
+ return -ENAMETOOLONG;
+ return len;
+}
+
+/**
+ * svc_xprt_names - format a buffer with a list of transport names
+ * @serv: pointer to an RPC service
+ * @buf: pointer to a buffer to be filled in
+ * @buflen: length of buffer to be filled in
+ *
+ * Fills in @buf with a string containing a list of transport names,
+ * each name terminated with '\n'.
+ *
+ * Returns positive length of the filled-in string on success; otherwise
+ * a negative errno value is returned if an error occurs.
*/
-int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen)
+int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
{
struct svc_xprt *xprt;
- char xprt_str[64];
- int totlen = 0;
- int len;
+ int len, totlen;
+ char *pos;
/* Sanity check args */
if (!serv)
return 0;
spin_lock_bh(&serv->sv_lock);
+
+ pos = buf;
+ totlen = 0;
list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
- len = snprintf(xprt_str, sizeof(xprt_str),
- "%s %d\n", xprt->xpt_class->xcl_name,
- svc_xprt_local_port(xprt));
- /* If the string was truncated, replace with error string */
- if (len >= sizeof(xprt_str))
- strcpy(xprt_str, "name-too-long\n");
- /* Don't overflow buffer */
- len = strlen(xprt_str);
- if (buflen && (len + totlen >= buflen))
+ len = svc_one_xprt_name(xprt, pos, buflen - totlen);
+ if (len < 0) {
+ *buf = '\0';
+ totlen = len;
+ }
+ if (len <= 0)
break;
- strcpy(buf+totlen, xprt_str);
+
+ pos += len;
totlen += len;
}
+
spin_unlock_bh(&serv->sv_lock);
return totlen;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9d50423..23128ee 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -240,42 +240,76 @@
/*
* Report socket names for nfsdfs
*/
-static int one_sock_name(char *buf, struct svc_sock *svsk)
+static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
{
+ const struct sock *sk = svsk->sk_sk;
+ const char *proto_name = sk->sk_protocol == IPPROTO_UDP ?
+ "udp" : "tcp";
int len;
- switch(svsk->sk_sk->sk_family) {
- case AF_INET:
- len = sprintf(buf, "ipv4 %s %pI4 %d\n",
- svsk->sk_sk->sk_protocol == IPPROTO_UDP ?
- "udp" : "tcp",
- &inet_sk(svsk->sk_sk)->rcv_saddr,
- inet_sk(svsk->sk_sk)->num);
+ switch (sk->sk_family) {
+ case PF_INET:
+ len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n",
+ proto_name,
+ &inet_sk(sk)->rcv_saddr,
+ inet_sk(sk)->num);
+ break;
+ case PF_INET6:
+ len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
+ proto_name,
+ &inet6_sk(sk)->rcv_saddr,
+ inet_sk(sk)->num);
break;
default:
- len = sprintf(buf, "*unknown-%d*\n",
- svsk->sk_sk->sk_family);
+ len = snprintf(buf, remaining, "*unknown-%d*\n",
+ sk->sk_family);
+ }
+
+ if (len >= remaining) {
+ *buf = '\0';
+ return -ENAMETOOLONG;
}
return len;
}
-int
-svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
+/**
+ * svc_sock_names - construct a list of listener names in a string
+ * @serv: pointer to RPC service
+ * @buf: pointer to a buffer to fill in with socket names
+ * @buflen: size of the buffer to be filled
+ * @toclose: pointer to '\0'-terminated C string containing the name
+ * of a listener to be closed
+ *
+ * Fills in @buf with a '\n'-separated list of names of listener
+ * sockets. If @toclose is not NULL, the socket named by @toclose
+ * is closed, and is not included in the output list.
+ *
+ * Returns positive length of the socket name string, or a negative
+ * errno value on error.
+ */
+int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen,
+ const char *toclose)
{
struct svc_sock *svsk, *closesk = NULL;
int len = 0;
if (!serv)
return 0;
+
spin_lock_bh(&serv->sv_lock);
list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) {
- int onelen = one_sock_name(buf+len, svsk);
- if (toclose && strcmp(toclose, buf+len) == 0)
+ int onelen = svc_one_sock_name(svsk, buf + len, buflen - len);
+ if (onelen < 0) {
+ len = onelen;
+ break;
+ }
+ if (toclose && strcmp(toclose, buf + len) == 0)
closesk = svsk;
else
len += onelen;
}
spin_unlock_bh(&serv->sv_lock);
+
if (closesk)
/* Should unregister with portmap, but you cannot
* unregister just one protocol...
@@ -346,6 +380,7 @@
sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2;
sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
+ sock->sk->sk_write_space(sock->sk);
release_sock(sock->sk);
#endif
}
@@ -387,6 +422,15 @@
}
}
+static void svc_tcp_write_space(struct sock *sk)
+{
+ struct socket *sock = sk->sk_socket;
+
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
+ clear_bit(SOCK_NOSPACE, &sock->flags);
+ svc_write_space(sk);
+}
+
/*
* Copy the UDP datagram's destination address to the rqstp structure.
* The 'destination' address in this case is the address to which the
@@ -427,13 +471,14 @@
long all[SVC_PKTINFO_SPACE / sizeof(long)];
} buffer;
struct cmsghdr *cmh = &buffer.hdr;
- int err, len;
struct msghdr msg = {
.msg_name = svc_addr(rqstp),
.msg_control = cmh,
.msg_controllen = sizeof(buffer),
.msg_flags = MSG_DONTWAIT,
};
+ size_t len;
+ int err;
if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
/* udp sockets need large rcvbuf as all pending
@@ -465,8 +510,8 @@
return -EAGAIN;
}
len = svc_addr_len(svc_addr(rqstp));
- if (len < 0)
- return len;
+ if (len == 0)
+ return -EAFNOSUPPORT;
rqstp->rq_addrlen = len;
if (skb->tstamp.tv64 == 0) {
skb->tstamp = ktime_get_real();
@@ -980,25 +1025,16 @@
static int svc_tcp_has_wspace(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
- struct svc_serv *serv = svsk->sk_xprt.xpt_server;
+ struct svc_serv *serv = svsk->sk_xprt.xpt_server;
int required;
- int wspace;
- /*
- * Set the SOCK_NOSPACE flag before checking the available
- * sock space.
- */
+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
+ return 1;
+ required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
+ if (sk_stream_wspace(svsk->sk_sk) >= required)
+ return 1;
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
- required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
- wspace = sk_stream_wspace(svsk->sk_sk);
-
- if (wspace < sk_stream_min_wspace(svsk->sk_sk))
- return 0;
- if (required * 2 > wspace)
- return 0;
-
- clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
- return 1;
+ return 0;
}
static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
@@ -1054,7 +1090,7 @@
dprintk("setting up TCP socket for reading\n");
sk->sk_state_change = svc_tcp_state_change;
sk->sk_data_ready = svc_tcp_data_ready;
- sk->sk_write_space = svc_write_space;
+ sk->sk_write_space = svc_tcp_write_space;
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
@@ -1148,9 +1184,19 @@
return svsk;
}
-int svc_addsock(struct svc_serv *serv,
- int fd,
- char *name_return)
+/**
+ * svc_addsock - add a listener socket to an RPC service
+ * @serv: pointer to RPC service to which to add a new listener
+ * @fd: file descriptor of the new listener
+ * @name_return: pointer to buffer to fill in with name of listener
+ * @len: size of the buffer
+ *
+ * Fills in socket name and returns positive length of name if successful.
+ * Name is terminated with '\n'. On error, returns a negative errno
+ * value.
+ */
+int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
+ const size_t len)
{
int err = 0;
struct socket *so = sockfd_lookup(fd, &err);
@@ -1190,7 +1236,7 @@
sockfd_put(so);
return err;
}
- return one_sock_name(name_return, svsk);
+ return svc_one_sock_name(svsk, name_return, len);
}
EXPORT_SYMBOL_GPL(svc_addsock);
@@ -1327,3 +1373,42 @@
sock_release(svsk->sk_sock);
kfree(svsk);
}
+
+/*
+ * Create a svc_xprt.
+ *
+ * For internal use only (e.g. nfsv4.1 backchannel).
+ * Callers should typically use the xpo_create() method.
+ */
+struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+{
+ struct svc_sock *svsk;
+ struct svc_xprt *xprt = NULL;
+
+ dprintk("svc: %s\n", __func__);
+ svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
+ if (!svsk)
+ goto out;
+
+ xprt = &svsk->sk_xprt;
+ if (prot == IPPROTO_TCP)
+ svc_xprt_init(&svc_tcp_class, xprt, serv);
+ else if (prot == IPPROTO_UDP)
+ svc_xprt_init(&svc_udp_class, xprt, serv);
+ else
+ BUG();
+out:
+ dprintk("svc: %s return %p\n", __func__, xprt);
+ return xprt;
+}
+EXPORT_SYMBOL_GPL(svc_sock_create);
+
+/*
+ * Destroy a svc_sock.
+ */
+void svc_sock_destroy(struct svc_xprt *xprt)
+{
+ if (xprt)
+ kfree(container_of(xprt, struct svc_sock, sk_xprt));
+}
+EXPORT_SYMBOL_GPL(svc_sock_destroy);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 06ca058..f412a85 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -12,8 +12,9 @@
* - Next, the caller puts together the RPC message, stuffs it into
* the request struct, and calls xprt_transmit().
* - xprt_transmit sends the message and installs the caller on the
- * transport's wait list. At the same time, it installs a timer that
- * is run after the packet's timeout has expired.
+ * transport's wait list. At the same time, if a reply is expected,
+ * it installs a timer that is run after the packet's timeout has
+ * expired.
* - When a packet arrives, the data_ready handler walks the list of
* pending requests for that transport. If a matching XID is found, the
* caller is woken up, and the timer removed.
@@ -46,6 +47,8 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/metrics.h>
+#include "sunrpc.h"
+
/*
* Local variables
*/
@@ -192,8 +195,8 @@
*/
int xprt_reserve_xprt(struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
@@ -803,9 +806,10 @@
list_del_init(&req->rq_list);
req->rq_private_buf.len = copied;
- /* Ensure all writes are done before we update req->rq_received */
+ /* Ensure all writes are done before we update */
+ /* req->rq_reply_bytes_recvd */
smp_wmb();
- req->rq_received = copied;
+ req->rq_reply_bytes_recvd = copied;
rpc_wake_up_queued_task(&xprt->pending, task);
}
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
@@ -820,7 +824,7 @@
dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
spin_lock_bh(&xprt->transport_lock);
- if (!req->rq_received) {
+ if (!req->rq_reply_bytes_recvd) {
if (xprt->ops->timer)
xprt->ops->timer(task);
} else
@@ -842,8 +846,8 @@
dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
spin_lock_bh(&xprt->transport_lock);
- if (req->rq_received && !req->rq_bytes_sent) {
- err = req->rq_received;
+ if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
+ err = req->rq_reply_bytes_recvd;
goto out_unlock;
}
if (!xprt->ops->reserve_xprt(task))
@@ -855,7 +859,7 @@
void xprt_end_transmit(struct rpc_task *task)
{
- xprt_release_write(task->tk_xprt, task);
+ xprt_release_write(task->tk_rqstp->rq_xprt, task);
}
/**
@@ -872,8 +876,11 @@
dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
- if (!req->rq_received) {
- if (list_empty(&req->rq_list)) {
+ if (!req->rq_reply_bytes_recvd) {
+ if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
+ /*
+ * Add to the list only if we're expecting a reply
+ */
spin_lock_bh(&xprt->transport_lock);
/* Update the softirq receive buffer */
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
@@ -908,8 +915,13 @@
/* Don't race with disconnect */
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
- else if (!req->rq_received)
+ else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
+ /*
+ * Sleep on the pending queue since
+ * we're expecting a reply.
+ */
rpc_sleep_on(&xprt->pending, task, xprt_timer);
+ }
spin_unlock_bh(&xprt->transport_lock);
}
@@ -982,11 +994,17 @@
*/
void xprt_release(struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_xprt *xprt;
struct rpc_rqst *req;
+ int is_bc_request;
if (!(req = task->tk_rqstp))
return;
+
+ /* Preallocated backchannel request? */
+ is_bc_request = bc_prealloc(req);
+
+ xprt = req->rq_xprt;
rpc_count_iostats(task);
spin_lock_bh(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
@@ -999,10 +1017,19 @@
mod_timer(&xprt->timer,
xprt->last_used + xprt->idle_timeout);
spin_unlock_bh(&xprt->transport_lock);
- xprt->ops->buf_free(req->rq_buffer);
+ if (!bc_prealloc(req))
+ xprt->ops->buf_free(req->rq_buffer);
task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
+
+ /*
+ * Early exit if this is a backchannel preallocated request.
+ * There is no need to have it added to the RPC slot list.
+ */
+ if (is_bc_request)
+ return;
+
memset(req, 0, sizeof(*req)); /* mark unused */
dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
@@ -1049,6 +1076,11 @@
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
+#if defined(CONFIG_NFS_V4_1)
+ spin_lock_init(&xprt->bc_pa_lock);
+ INIT_LIST_HEAD(&xprt->bc_pa_list);
+#endif /* CONFIG_NFS_V4_1 */
+
INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
setup_timer(&xprt->timer, xprt_init_autodisconnect,
(unsigned long)xprt);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 42a6f9f..9e88438 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -397,14 +397,14 @@
if (!ch)
return 0;
- /* Allocate temporary reply and chunk maps */
- rpl_map = svc_rdma_get_req_map();
- chl_map = svc_rdma_get_req_map();
-
svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
if (ch_count > RPCSVC_MAXPAGES)
return -EINVAL;
+ /* Allocate temporary reply and chunk maps */
+ rpl_map = svc_rdma_get_req_map();
+ chl_map = svc_rdma_get_req_map();
+
if (!xprt->sc_frmr_pg_list_len)
sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
rpl_map, chl_map, ch_count,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6c2d615..83c73c4 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -34,6 +34,9 @@
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprtsock.h>
#include <linux/file.h>
+#ifdef CONFIG_NFS_V4_1
+#include <linux/sunrpc/bc_xprt.h>
+#endif
#include <net/sock.h>
#include <net/checksum.h>
@@ -270,6 +273,13 @@
#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
#define TCP_RCV_COPY_XID (1UL << 2)
#define TCP_RCV_COPY_DATA (1UL << 3)
+#define TCP_RCV_READ_CALLDIR (1UL << 4)
+#define TCP_RCV_COPY_CALLDIR (1UL << 5)
+
+/*
+ * TCP RPC flags
+ */
+#define TCP_RPC_REPLY (1UL << 6)
static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
{
@@ -956,7 +966,7 @@
transport->tcp_offset = 0;
/* Sanity check of the record length */
- if (unlikely(transport->tcp_reclen < 4)) {
+ if (unlikely(transport->tcp_reclen < 8)) {
dprintk("RPC: invalid TCP record fragment length\n");
xprt_force_disconnect(xprt);
return;
@@ -991,33 +1001,77 @@
if (used != len)
return;
transport->tcp_flags &= ~TCP_RCV_COPY_XID;
- transport->tcp_flags |= TCP_RCV_COPY_DATA;
+ transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
transport->tcp_copied = 4;
- dprintk("RPC: reading reply for XID %08x\n",
+ dprintk("RPC: reading %s XID %08x\n",
+ (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
+ : "request with",
ntohl(transport->tcp_xid));
xs_tcp_check_fraghdr(transport);
}
-static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ struct xdr_skb_reader *desc)
{
- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- struct rpc_rqst *req;
+ size_t len, used;
+ u32 offset;
+ __be32 calldir;
+
+ /*
+ * We want transport->tcp_offset to be 8 at the end of this routine
+ * (4 bytes for the xid and 4 bytes for the call/reply flag).
+ * When this function is called for the first time,
+ * transport->tcp_offset is 4 (after having already read the xid).
+ */
+ offset = transport->tcp_offset - sizeof(transport->tcp_xid);
+ len = sizeof(calldir) - offset;
+ dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
+ used = xdr_skb_read_bits(desc, &calldir, len);
+ transport->tcp_offset += used;
+ if (used != len)
+ return;
+ transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_DATA;
+ /*
+ * We don't yet have the XDR buffer, so we will write the calldir
+ * out after we get the buffer from the 'struct rpc_rqst'
+ */
+ if (ntohl(calldir) == RPC_REPLY)
+ transport->tcp_flags |= TCP_RPC_REPLY;
+ else
+ transport->tcp_flags &= ~TCP_RPC_REPLY;
+ dprintk("RPC: reading %s CALL/REPLY flag %08x\n",
+ (transport->tcp_flags & TCP_RPC_REPLY) ?
+ "reply for" : "request with", calldir);
+ xs_tcp_check_fraghdr(transport);
+}
+
+static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc,
+ struct rpc_rqst *req)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
struct xdr_buf *rcvbuf;
size_t len;
ssize_t r;
- /* Find and lock the request corresponding to this xid */
- spin_lock(&xprt->transport_lock);
- req = xprt_lookup_rqst(xprt, transport->tcp_xid);
- if (!req) {
- transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
- dprintk("RPC: XID %08x request not found!\n",
- ntohl(transport->tcp_xid));
- spin_unlock(&xprt->transport_lock);
- return;
+ rcvbuf = &req->rq_private_buf;
+
+ if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
+ /*
+ * Save the RPC direction in the XDR buffer
+ */
+ __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
+ htonl(RPC_REPLY) : 0;
+
+ memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
+ &calldir, sizeof(calldir));
+ transport->tcp_copied += sizeof(calldir);
+ transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
}
- rcvbuf = &req->rq_private_buf;
len = desc->count;
if (len > transport->tcp_reclen - transport->tcp_offset) {
struct xdr_skb_reader my_desc;
@@ -1054,7 +1108,7 @@
"tcp_offset = %u, tcp_reclen = %u\n",
xprt, transport->tcp_copied,
transport->tcp_offset, transport->tcp_reclen);
- goto out;
+ return;
}
dprintk("RPC: XID %08x read %Zd bytes\n",
@@ -1070,11 +1124,125 @@
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
}
-out:
+ return;
+}
+
+/*
+ * Finds the request corresponding to the RPC xid and invokes the common
+ * tcp read code to read the data.
+ */
+static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct rpc_rqst *req;
+
+ dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
+
+ /* Find and lock the request corresponding to this xid */
+ spin_lock(&xprt->transport_lock);
+ req = xprt_lookup_rqst(xprt, transport->tcp_xid);
+ if (!req) {
+ dprintk("RPC: XID %08x request not found!\n",
+ ntohl(transport->tcp_xid));
+ spin_unlock(&xprt->transport_lock);
+ return -1;
+ }
+
+ xs_tcp_read_common(xprt, desc, req);
+
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
xprt_complete_rqst(req->rq_task, transport->tcp_copied);
+
spin_unlock(&xprt->transport_lock);
- xs_tcp_check_fraghdr(transport);
+ return 0;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Obtains an rpc_rqst previously allocated and invokes the common
+ * tcp read code to read the data. The result is placed in the callback
+ * queue.
+ * If we're unable to obtain the rpc_rqst we schedule the closing of the
+ * connection and return -1.
+ */
+static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct rpc_rqst *req;
+
+ req = xprt_alloc_bc_request(xprt);
+ if (req == NULL) {
+ printk(KERN_WARNING "Callback slot table overflowed\n");
+ xprt_force_disconnect(xprt);
+ return -1;
+ }
+
+ req->rq_xid = transport->tcp_xid;
+ dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
+ xs_tcp_read_common(xprt, desc, req);
+
+ if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
+ struct svc_serv *bc_serv = xprt->bc_serv;
+
+ /*
+ * Add callback request to callback list. The callback
+ * service sleeps on the sv_cb_waitq waiting for new
+ * requests. Wake it up after adding enqueing the
+ * request.
+ */
+ dprintk("RPC: add callback request to list\n");
+ spin_lock(&bc_serv->sv_cb_lock);
+ list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ spin_unlock(&bc_serv->sv_cb_lock);
+ wake_up(&bc_serv->sv_cb_waitq);
+ }
+
+ req->rq_private_buf.len = transport->tcp_copied;
+
+ return 0;
+}
+
+static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+
+ return (transport->tcp_flags & TCP_RPC_REPLY) ?
+ xs_tcp_read_reply(xprt, desc) :
+ xs_tcp_read_callback(xprt, desc);
+}
+#else
+static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ return xs_tcp_read_reply(xprt, desc);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Read data off the transport. This can be either an RPC_CALL or an
+ * RPC_REPLY. Relay the processing to helper functions.
+ */
+static void xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+
+ if (_xs_tcp_read_data(xprt, desc) == 0)
+ xs_tcp_check_fraghdr(transport);
+ else {
+ /*
+ * The transport_lock protects the request handling.
+ * There's no need to hold it to update the tcp_flags.
+ */
+ transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
+ }
}
static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
@@ -1114,9 +1282,14 @@
xs_tcp_read_xid(transport, &desc);
continue;
}
+ /* Read in the call/reply flag */
+ if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
+ xs_tcp_read_calldir(transport, &desc);
+ continue;
+ }
/* Read in the request data */
if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
- xs_tcp_read_request(xprt, &desc);
+ xs_tcp_read_data(xprt, &desc);
continue;
}
/* Skip over any trailing bytes on short reads */
@@ -1792,6 +1965,7 @@
*/
set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
xprt_force_disconnect(xprt);
+ break;
case -ECONNREFUSED:
case -ECONNRESET:
case -ENETUNREACH:
@@ -2010,6 +2184,9 @@
.buf_free = rpc_free,
.send_request = xs_tcp_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
+#if defined(CONFIG_NFS_V4_1)
+ .release_request = bc_release_request,
+#endif /* CONFIG_NFS_V4_1 */
.close = xs_tcp_close,
.destroy = xs_destroy,
.print_stats = xs_tcp_print_stats,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2416856..241bddd 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1687,13 +1687,52 @@
if (err)
goto out_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
- err = -EINVAL;
+ err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
+ if (err)
goto out;
+
+ /* validate settings */
+ err = 0;
+
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* disallow mesh-specific things */
+ if (params.plink_action)
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* disallow everything but AUTHORIZED flag */
+ if (params.plink_action)
+ err = -EINVAL;
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ /* disallow things mesh doesn't support */
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.sta_flags_mask)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
}
- err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
if (err)
goto out;
@@ -1728,9 +1767,6 @@
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
- if (!info->attrs[NL80211_ATTR_STA_AID])
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
return -EINVAL;
@@ -1745,9 +1781,11 @@
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
- params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
- if (!params.aid || params.aid > IEEE80211_MAX_AID)
- return -EINVAL;
+ if (info->attrs[NL80211_ATTR_STA_AID]) {
+ params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
+ if (!params.aid || params.aid > IEEE80211_MAX_AID)
+ return -EINVAL;
+ }
if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
params.ht_capa =
@@ -1762,13 +1800,39 @@
if (err)
goto out_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
- err = -EINVAL;
+ err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
+ if (err)
goto out;
+
+ /* validate settings */
+ err = 0;
+
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* all ok but must have AID */
+ if (!params.aid)
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ /* disallow things mesh doesn't support */
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.aid)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.sta_flags_mask)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
}
- err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
if (err)
goto out;
@@ -1812,7 +1876,8 @@
goto out_rtnl;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
err = -EINVAL;
goto out;
}
diff --git a/scripts/dtc/.gitignore b/scripts/dtc/.gitignore
new file mode 100644
index 0000000..095acb4
--- /dev/null
+++ b/scripts/dtc/.gitignore
@@ -0,0 +1,5 @@
+dtc
+dtc-lexer.lex.c
+dtc-parser.tab.c
+dtc-parser.tab.h
+
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 84da3ba..ac2150e 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -320,7 +320,7 @@
snd_mask_max(¶ms->masks[SNDRV_PCM_HW_PARAM_CHANNELS])) {
changed = substream->ops->ioctl(substream,
SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
- if (params < 0)
+ if (changed < 0)
return changed;
}
}
diff --git a/sound/core/seq/seq_midi_event.c b/sound/core/seq/seq_midi_event.c
index 8284f17..b5d6ea4 100644
--- a/sound/core/seq/seq_midi_event.c
+++ b/sound/core/seq/seq_midi_event.c
@@ -504,10 +504,10 @@
if (dev->nostat && count < 12)
return -ENOMEM;
cmd = MIDI_CMD_CONTROL|(ev->data.control.channel & 0x0f);
- bytes[0] = ev->data.control.param & 0x007f;
- bytes[1] = (ev->data.control.param & 0x3f80) >> 7;
- bytes[2] = ev->data.control.value & 0x007f;
- bytes[3] = (ev->data.control.value & 0x3f80) >> 7;
+ bytes[0] = (ev->data.control.param & 0x3f80) >> 7;
+ bytes[1] = ev->data.control.param & 0x007f;
+ bytes[2] = (ev->data.control.value & 0x3f80) >> 7;
+ bytes[3] = ev->data.control.value & 0x007f;
if (cmd != dev->lastcmd && !dev->nostat) {
if (count < 9)
return -ENOMEM;
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index b0adc80..a49c766 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -46,8 +46,6 @@
SND_PCI_QUIRK(PCI_VENDOR_ID_CREATIVE, 0x0031, "SB073x", CTSB073X),
SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_CREATIVE, 0xf000, 0x6000,
"UAA", CTUAA),
- SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_CREATIVE,
- "Unknown", CT20K1_UNKNOWN),
{ } /* terminator */
};
@@ -67,13 +65,16 @@
};
static const char *ct_subsys_name[NUM_CTCARDS] = {
+ /* 20k1 models */
[CTSB055X] = "SB055x",
[CTSB073X] = "SB073x",
- [CTSB0760] = "SB076x",
[CTUAA] = "UAA",
[CT20K1_UNKNOWN] = "Unknown",
+ /* 20k2 models */
+ [CTSB0760] = "SB076x",
[CTHENDRIX] = "Hendrix",
[CTSB0880] = "SB0880",
+ [CT20K2_UNKNOWN] = "Unknown",
};
static struct {
@@ -260,13 +261,8 @@
int device = apcm->substream->pcm->device;
unsigned int pitch;
- if (NULL != apcm->src) {
- /* Prepared pcm playback */
- return 0;
- }
-
/* first release old resources */
- atc->pcm_release_resources(atc, apcm);
+ atc_pcm_release_resources(atc, apcm);
/* Get SRC resource */
desc.multi = apcm->substream->runtime->channels;
@@ -660,10 +656,7 @@
unsigned int pitch;
int mix_base = 0, imp_base = 0;
- if (NULL != apcm->src) {
- /* Prepared pcm capture */
- return 0;
- }
+ atc_pcm_release_resources(atc, apcm);
/* Get needed resources. */
err = atc_pcm_capture_get_resources(atc, apcm);
@@ -866,7 +859,7 @@
struct dao *dao = container_of(atc->daios[SPDIFOO], struct dao, daio);
unsigned int rate = apcm->substream->runtime->rate;
unsigned int status;
- int err;
+ int err = 0;
unsigned char iec958_con_fs;
switch (rate) {
@@ -907,8 +900,7 @@
int err;
int i;
- if (NULL != apcm->src)
- return 0;
+ atc_pcm_release_resources(atc, apcm);
/* Configure SPDIFOO and PLL to passthrough mode;
* determine pll_rate. */
@@ -1115,32 +1107,20 @@
return err;
}
-static int ct_atc_destroy(struct ct_atc *atc)
+static int atc_release_resources(struct ct_atc *atc)
{
- struct daio_mgr *daio_mgr;
- struct dao *dao;
- struct dai *dai;
- struct daio *daio;
- struct sum_mgr *sum_mgr;
- struct src_mgr *src_mgr;
- struct srcimp_mgr *srcimp_mgr;
- struct srcimp *srcimp;
- struct ct_mixer *mixer;
- int i = 0;
+ int i;
+ struct daio_mgr *daio_mgr = NULL;
+ struct dao *dao = NULL;
+ struct dai *dai = NULL;
+ struct daio *daio = NULL;
+ struct sum_mgr *sum_mgr = NULL;
+ struct src_mgr *src_mgr = NULL;
+ struct srcimp_mgr *srcimp_mgr = NULL;
+ struct srcimp *srcimp = NULL;
+ struct ct_mixer *mixer = NULL;
- if (NULL == atc)
- return 0;
-
- if (atc->timer) {
- ct_timer_free(atc->timer);
- atc->timer = NULL;
- }
-
- /* Stop hardware and disable all interrupts */
- if (NULL != atc->hw)
- ((struct hw *)atc->hw)->card_stop(atc->hw);
-
- /* Destroy internal mixer objects */
+ /* disconnect internal mixer objects */
if (NULL != atc->mixer) {
mixer = atc->mixer;
mixer->set_input_left(mixer, MIX_LINE_IN, NULL);
@@ -1149,7 +1129,6 @@
mixer->set_input_right(mixer, MIX_MIC_IN, NULL);
mixer->set_input_left(mixer, MIX_SPDIF_IN, NULL);
mixer->set_input_right(mixer, MIX_SPDIF_IN, NULL);
- ct_mixer_destroy(atc->mixer);
}
if (NULL != atc->daios) {
@@ -1167,6 +1146,7 @@
daio_mgr->put_daio(daio_mgr, daio);
}
kfree(atc->daios);
+ atc->daios = NULL;
}
if (NULL != atc->pcm) {
@@ -1175,6 +1155,7 @@
sum_mgr->put_sum(sum_mgr, atc->pcm[i]);
kfree(atc->pcm);
+ atc->pcm = NULL;
}
if (NULL != atc->srcs) {
@@ -1183,6 +1164,7 @@
src_mgr->put_src(src_mgr, atc->srcs[i]);
kfree(atc->srcs);
+ atc->srcs = NULL;
}
if (NULL != atc->srcimps) {
@@ -1193,8 +1175,30 @@
srcimp_mgr->put_srcimp(srcimp_mgr, atc->srcimps[i]);
}
kfree(atc->srcimps);
+ atc->srcimps = NULL;
}
+ return 0;
+}
+
+static int ct_atc_destroy(struct ct_atc *atc)
+{
+ int i = 0;
+
+ if (NULL == atc)
+ return 0;
+
+ if (atc->timer) {
+ ct_timer_free(atc->timer);
+ atc->timer = NULL;
+ }
+
+ atc_release_resources(atc);
+
+ /* Destroy internal mixer objects */
+ if (NULL != atc->mixer)
+ ct_mixer_destroy(atc->mixer);
+
for (i = 0; i < NUM_RSCTYP; i++) {
if ((NULL != rsc_mgr_funcs[i].destroy) &&
(NULL != atc->rsc_mgrs[i]))
@@ -1240,9 +1244,21 @@
return -ENOENT;
}
p = snd_pci_quirk_lookup(atc->pci, list);
- if (!p)
- return -ENOENT;
- atc->model = p->value;
+ if (p) {
+ if (p->value < 0) {
+ printk(KERN_ERR "ctxfi: "
+ "Device %04x:%04x is black-listed\n",
+ atc->pci->subsystem_vendor,
+ atc->pci->subsystem_device);
+ return -ENOENT;
+ }
+ atc->model = p->value;
+ } else {
+ if (atc->chip_type == ATC20K1)
+ atc->model = CT20K1_UNKNOWN;
+ else
+ atc->model = CT20K2_UNKNOWN;
+ }
atc->model_name = ct_subsys_name[atc->model];
snd_printd("ctxfi: chip %s model %s (%04x:%04x) is found\n",
atc->chip_name, atc->model_name,
@@ -1310,7 +1326,7 @@
return 0;
}
-static int __devinit atc_get_resources(struct ct_atc *atc)
+static int atc_get_resources(struct ct_atc *atc)
{
struct daio_desc da_desc = {0};
struct daio_mgr *daio_mgr;
@@ -1407,16 +1423,10 @@
atc->n_pcm++;
}
- err = ct_mixer_create(atc, (struct ct_mixer **)&atc->mixer);
- if (err) {
- printk(KERN_ERR "ctxfi: Failed to create mixer obj!!!\n");
- return err;
- }
-
return 0;
}
-static void __devinit
+static void
atc_connect_dai(struct src_mgr *src_mgr, struct dai *dai,
struct src **srcs, struct srcimp **srcimps)
{
@@ -1455,7 +1465,7 @@
src_mgr->commit_write(src_mgr); /* Synchronously enable SRCs */
}
-static void __devinit atc_connect_resources(struct ct_atc *atc)
+static void atc_connect_resources(struct ct_atc *atc)
{
struct dai *dai;
struct dao *dao;
@@ -1501,6 +1511,84 @@
}
}
+#ifdef CONFIG_PM
+static int atc_suspend(struct ct_atc *atc, pm_message_t state)
+{
+ int i;
+ struct hw *hw = atc->hw;
+
+ snd_power_change_state(atc->card, SNDRV_CTL_POWER_D3hot);
+
+ for (i = FRONT; i < NUM_PCMS; i++) {
+ if (!atc->pcms[i])
+ continue;
+
+ snd_pcm_suspend_all(atc->pcms[i]);
+ }
+
+ atc_release_resources(atc);
+
+ hw->suspend(hw, state);
+
+ return 0;
+}
+
+static int atc_hw_resume(struct ct_atc *atc)
+{
+ struct hw *hw = atc->hw;
+ struct card_conf info = {0};
+
+ /* Re-initialize card hardware. */
+ info.rsr = atc->rsr;
+ info.msr = atc->msr;
+ info.vm_pgt_phys = atc_get_ptp_phys(atc, 0);
+ return hw->resume(hw, &info);
+}
+
+static int atc_resources_resume(struct ct_atc *atc)
+{
+ struct ct_mixer *mixer;
+ int err = 0;
+
+ /* Get resources */
+ err = atc_get_resources(atc);
+ if (err < 0) {
+ atc_release_resources(atc);
+ return err;
+ }
+
+ /* Build topology */
+ atc_connect_resources(atc);
+
+ mixer = atc->mixer;
+ mixer->resume(mixer);
+
+ return 0;
+}
+
+static int atc_resume(struct ct_atc *atc)
+{
+ int err = 0;
+
+ /* Do hardware resume. */
+ err = atc_hw_resume(atc);
+ if (err < 0) {
+ printk(KERN_ERR "ctxfi: pci_enable_device failed, "
+ "disabling device\n");
+ snd_card_disconnect(atc->card);
+ return err;
+ }
+
+ err = atc_resources_resume(atc);
+ if (err < 0)
+ return err;
+
+ snd_power_change_state(atc->card, SNDRV_CTL_POWER_D0);
+
+ return 0;
+}
+#endif
+
static struct ct_atc atc_preset __devinitdata = {
.map_audio_buffer = ct_map_audio_buffer,
.unmap_audio_buffer = ct_unmap_audio_buffer,
@@ -1529,6 +1617,10 @@
.spdif_out_set_status = atc_spdif_out_set_status,
.spdif_out_passthru = atc_spdif_out_passthru,
.have_digit_io_switch = atc_have_digit_io_switch,
+#ifdef CONFIG_PM
+ .suspend = atc_suspend,
+ .resume = atc_resume,
+#endif
};
/**
@@ -1587,6 +1679,12 @@
if (err < 0)
goto error1;
+ err = ct_mixer_create(atc, (struct ct_mixer **)&atc->mixer);
+ if (err) {
+ printk(KERN_ERR "ctxfi: Failed to create mixer obj!!!\n");
+ goto error1;
+ }
+
/* Get resources */
err = atc_get_resources(atc);
if (err < 0)
diff --git a/sound/pci/ctxfi/ctatc.h b/sound/pci/ctxfi/ctatc.h
index 9fe620e..9fd8a57 100644
--- a/sound/pci/ctxfi/ctatc.h
+++ b/sound/pci/ctxfi/ctatc.h
@@ -136,6 +136,13 @@
unsigned char n_pcm;
struct ct_timer *timer;
+
+#ifdef CONFIG_PM
+ int (*suspend)(struct ct_atc *atc, pm_message_t state);
+ int (*resume)(struct ct_atc *atc);
+#define NUM_PCMS (NUM_CTALSADEVS - 1)
+ struct snd_pcm *pcms[NUM_PCMS];
+#endif
};
diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h
index 4a8e04f..af55405 100644
--- a/sound/pci/ctxfi/cthardware.h
+++ b/sound/pci/ctxfi/cthardware.h
@@ -30,13 +30,16 @@
enum CTCARDS {
/* 20k1 models */
CTSB055X,
+ CT20K1_MODEL_FIRST = CTSB055X,
CTSB073X,
CTUAA,
CT20K1_UNKNOWN,
/* 20k2 models */
CTSB0760,
+ CT20K2_MODEL_FIRST = CTSB0760,
CTHENDRIX,
CTSB0880,
+ CT20K2_UNKNOWN,
NUM_CTCARDS /* This should always be the last */
};
@@ -61,6 +64,10 @@
int (*card_init)(struct hw *hw, struct card_conf *info);
int (*card_stop)(struct hw *hw);
int (*pll_init)(struct hw *hw, unsigned int rsr);
+#ifdef CONFIG_PM
+ int (*suspend)(struct hw *hw, pm_message_t state);
+ int (*resume)(struct hw *hw, struct card_conf *info);
+#endif
int (*is_adc_source_selected)(struct hw *hw, enum ADCSRC source);
int (*select_adc_source)(struct hw *hw, enum ADCSRC source);
int (*have_digit_io_switch)(struct hw *hw);
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index cb69d9d..ad3e1d1 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1911,9 +1911,17 @@
goto error1;
}
- err = pci_request_regions(pci, "XFi");
- if (err < 0)
- goto error1;
+ if (!hw->io_base) {
+ err = pci_request_regions(pci, "XFi");
+ if (err < 0)
+ goto error1;
+
+ if (hw->model == CTUAA)
+ hw->io_base = pci_resource_start(pci, 5);
+ else
+ hw->io_base = pci_resource_start(pci, 0);
+
+ }
/* Switch to X-Fi mode from UAA mode if neeeded */
if (hw->model == CTUAA) {
@@ -1921,18 +1929,17 @@
if (err)
goto error2;
- hw->io_base = pci_resource_start(pci, 5);
- } else {
- hw->io_base = pci_resource_start(pci, 0);
}
- err = request_irq(pci->irq, ct_20k1_interrupt, IRQF_SHARED,
- "ctxfi", hw);
- if (err < 0) {
- printk(KERN_ERR "XFi: Cannot get irq %d\n", pci->irq);
- goto error2;
+ if (hw->irq < 0) {
+ err = request_irq(pci->irq, ct_20k1_interrupt, IRQF_SHARED,
+ "ctxfi", hw);
+ if (err < 0) {
+ printk(KERN_ERR "XFi: Cannot get irq %d\n", pci->irq);
+ goto error2;
+ }
+ hw->irq = pci->irq;
}
- hw->irq = pci->irq;
pci_set_master(pci);
@@ -1948,6 +1955,15 @@
static int hw_card_stop(struct hw *hw)
{
+ unsigned int data;
+
+ /* disable transport bus master and queueing of request */
+ hw_write_20kx(hw, TRNCTL, 0x00);
+
+ /* disable pll */
+ data = hw_read_20kx(hw, PLLCTL);
+ hw_write_20kx(hw, PLLCTL, (data & (~(0x0F<<12))));
+
/* TODO: Disable interrupt and so on... */
if (hw->irq >= 0)
synchronize_irq(hw->irq);
@@ -1987,11 +2003,9 @@
struct trn_conf trn_info = {0};
/* Get PCI io port base address and do Hendrix switch if needed. */
- if (!hw->io_base) {
- err = hw_card_start(hw);
- if (err)
- return err;
- }
+ err = hw_card_start(hw);
+ if (err)
+ return err;
/* PLL init */
err = hw_pll_init(hw, info->rsr);
@@ -2064,6 +2078,37 @@
return 0;
}
+#ifdef CONFIG_PM
+static int hw_suspend(struct hw *hw, pm_message_t state)
+{
+ struct pci_dev *pci = hw->pci;
+
+ hw_card_stop(hw);
+
+ if (hw->model == CTUAA) {
+ /* Switch to UAA config space. */
+ pci_write_config_dword(pci, UAA_CFG_SPACE_FLAG, 0x0);
+ }
+
+ pci_disable_device(pci);
+ pci_save_state(pci);
+ pci_set_power_state(pci, pci_choose_state(pci, state));
+
+ return 0;
+}
+
+static int hw_resume(struct hw *hw, struct card_conf *info)
+{
+ struct pci_dev *pci = hw->pci;
+
+ pci_set_power_state(pci, PCI_D0);
+ pci_restore_state(pci);
+
+ /* Re-initialize card hardware. */
+ return hw_card_init(hw, info);
+}
+#endif
+
static u32 hw_read_20kx(struct hw *hw, u32 reg)
{
u32 value;
@@ -2128,6 +2173,10 @@
.is_adc_source_selected = hw_is_adc_input_selected,
.select_adc_source = hw_adc_input_select,
.have_digit_io_switch = hw_have_digit_io_switch,
+#ifdef CONFIG_PM
+ .suspend = hw_suspend,
+ .resume = hw_resume,
+#endif
.src_rsc_get_ctrl_blk = src_get_rsc_ctrl_blk,
.src_rsc_put_ctrl_blk = src_put_rsc_ctrl_blk,
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index 4493a51..dec46d0 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -1860,16 +1860,18 @@
goto error1;
}
- err = pci_request_regions(pci, "XFi");
- if (err < 0)
- goto error1;
+ if (!hw->io_base) {
+ err = pci_request_regions(pci, "XFi");
+ if (err < 0)
+ goto error1;
- hw->io_base = pci_resource_start(hw->pci, 2);
- hw->mem_base = (unsigned long)ioremap(hw->io_base,
+ hw->io_base = pci_resource_start(hw->pci, 2);
+ hw->mem_base = (unsigned long)ioremap(hw->io_base,
pci_resource_len(hw->pci, 2));
- if (NULL == (void *)hw->mem_base) {
- err = -ENOENT;
- goto error2;
+ if (NULL == (void *)hw->mem_base) {
+ err = -ENOENT;
+ goto error2;
+ }
}
/* Switch to 20k2 mode from UAA mode. */
@@ -1901,6 +1903,15 @@
static int hw_card_stop(struct hw *hw)
{
+ unsigned int data;
+
+ /* disable transport bus master and queueing of request */
+ hw_write_20kx(hw, TRANSPORT_CTL, 0x00);
+
+ /* disable pll */
+ data = hw_read_20kx(hw, PLL_ENB);
+ hw_write_20kx(hw, PLL_ENB, (data & (~0x07)));
+
/* TODO: Disable interrupt and so on... */
return 0;
}
@@ -1939,11 +1950,9 @@
/* Get PCI io port/memory base address and
* do 20kx core switch if needed. */
- if (!hw->io_base) {
- err = hw_card_start(hw);
- if (err)
- return err;
- }
+ err = hw_card_start(hw);
+ if (err)
+ return err;
/* PLL init */
err = hw_pll_init(hw, info->rsr);
@@ -2006,6 +2015,32 @@
return 0;
}
+#ifdef CONFIG_PM
+static int hw_suspend(struct hw *hw, pm_message_t state)
+{
+ struct pci_dev *pci = hw->pci;
+
+ hw_card_stop(hw);
+
+ pci_disable_device(pci);
+ pci_save_state(pci);
+ pci_set_power_state(pci, pci_choose_state(pci, state));
+
+ return 0;
+}
+
+static int hw_resume(struct hw *hw, struct card_conf *info)
+{
+ struct pci_dev *pci = hw->pci;
+
+ pci_set_power_state(pci, PCI_D0);
+ pci_restore_state(pci);
+
+ /* Re-initialize card hardware. */
+ return hw_card_init(hw, info);
+}
+#endif
+
static u32 hw_read_20kx(struct hw *hw, u32 reg)
{
return readl((void *)(hw->mem_base + reg));
@@ -2025,6 +2060,10 @@
.is_adc_source_selected = hw_is_adc_input_selected,
.select_adc_source = hw_adc_input_select,
.have_digit_io_switch = hw_have_digit_io_switch,
+#ifdef CONFIG_PM
+ .suspend = hw_suspend,
+ .resume = hw_resume,
+#endif
.src_rsc_get_ctrl_blk = src_get_rsc_ctrl_blk,
.src_rsc_put_ctrl_blk = src_put_rsc_ctrl_blk,
diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c
index 666722d..f26d7cd 100644
--- a/sound/pci/ctxfi/ctmixer.c
+++ b/sound/pci/ctxfi/ctmixer.c
@@ -462,6 +462,43 @@
return;
}
+static void do_switch(struct ct_atc *atc, enum CTALSA_MIXER_CTL type, int state)
+{
+ struct ct_mixer *mixer = atc->mixer;
+
+ /* Do changes in mixer. */
+ if ((SWH_CAPTURE_START <= type) && (SWH_CAPTURE_END >= type)) {
+ if (state) {
+ ct_mixer_recording_select(mixer,
+ get_amixer_index(type));
+ } else {
+ ct_mixer_recording_unselect(mixer,
+ get_amixer_index(type));
+ }
+ }
+ /* Do changes out of mixer. */
+ if (state && (MIXER_LINEIN_C_S == type || MIXER_MIC_C_S == type))
+ do_line_mic_switch(atc, type);
+ else if (MIXER_WAVEF_P_S == type)
+ atc->line_front_unmute(atc, state);
+ else if (MIXER_WAVES_P_S == type)
+ atc->line_surround_unmute(atc, state);
+ else if (MIXER_WAVEC_P_S == type)
+ atc->line_clfe_unmute(atc, state);
+ else if (MIXER_WAVER_P_S == type)
+ atc->line_rear_unmute(atc, state);
+ else if (MIXER_LINEIN_P_S == type)
+ atc->line_in_unmute(atc, state);
+ else if (MIXER_SPDIFO_P_S == type)
+ atc->spdif_out_unmute(atc, state);
+ else if (MIXER_SPDIFI_P_S == type)
+ atc->spdif_in_unmute(atc, state);
+ else if (MIXER_DIGITAL_IO_S == type)
+ do_digit_io_switch(atc, state);
+
+ return;
+}
+
static int ct_alsa_mix_switch_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -498,35 +535,7 @@
return 0;
set_switch_state(mixer, type, state);
- /* Do changes in mixer. */
- if ((SWH_CAPTURE_START <= type) && (SWH_CAPTURE_END >= type)) {
- if (state) {
- ct_mixer_recording_select(mixer,
- get_amixer_index(type));
- } else {
- ct_mixer_recording_unselect(mixer,
- get_amixer_index(type));
- }
- }
- /* Do changes out of mixer. */
- if (state && (MIXER_LINEIN_C_S == type || MIXER_MIC_C_S == type))
- do_line_mic_switch(atc, type);
- else if (MIXER_WAVEF_P_S == type)
- atc->line_front_unmute(atc, state);
- else if (MIXER_WAVES_P_S == type)
- atc->line_surround_unmute(atc, state);
- else if (MIXER_WAVEC_P_S == type)
- atc->line_clfe_unmute(atc, state);
- else if (MIXER_WAVER_P_S == type)
- atc->line_rear_unmute(atc, state);
- else if (MIXER_LINEIN_P_S == type)
- atc->line_in_unmute(atc, state);
- else if (MIXER_SPDIFO_P_S == type)
- atc->spdif_out_unmute(atc, state);
- else if (MIXER_SPDIFI_P_S == type)
- atc->spdif_in_unmute(atc, state);
- else if (MIXER_DIGITAL_IO_S == type)
- do_digit_io_switch(atc, state);
+ do_switch(atc, type, state);
return 1;
}
@@ -1039,6 +1048,28 @@
return 0;
}
+#ifdef CONFIG_PM
+static int mixer_resume(struct ct_mixer *mixer)
+{
+ int i, state;
+ struct amixer *amixer;
+
+ /* resume topology and volume gain. */
+ for (i = 0; i < NUM_CT_AMIXERS*CHN_NUM; i++) {
+ amixer = mixer->amixers[i];
+ amixer->ops->commit_write(amixer);
+ }
+
+ /* resume switch state. */
+ for (i = SWH_MIXER_START; i <= SWH_MIXER_END; i++) {
+ state = get_switch_state(mixer, i);
+ do_switch(mixer->atc, i, state);
+ }
+
+ return 0;
+}
+#endif
+
int ct_mixer_destroy(struct ct_mixer *mixer)
{
struct sum_mgr *sum_mgr = (struct sum_mgr *)mixer->atc->rsc_mgrs[SUM];
@@ -1087,6 +1118,9 @@
mixer->get_output_ports = mixer_get_output_ports;
mixer->set_input_left = mixer_set_input_left;
mixer->set_input_right = mixer_set_input_right;
+#ifdef CONFIG_PM
+ mixer->resume = mixer_resume;
+#endif
/* Allocate chip resources for mixer obj */
err = ct_mixer_get_resources(mixer);
diff --git a/sound/pci/ctxfi/ctmixer.h b/sound/pci/ctxfi/ctmixer.h
index e2d96eb..b009e98 100644
--- a/sound/pci/ctxfi/ctmixer.h
+++ b/sound/pci/ctxfi/ctmixer.h
@@ -56,6 +56,9 @@
enum MIXER_PORT_T type, struct rsc *rsc);
int (*set_input_right)(struct ct_mixer *mixer,
enum MIXER_PORT_T type, struct rsc *rsc);
+#ifdef CONFIG_PM
+ int (*resume)(struct ct_mixer *mixer);
+#endif
};
int ct_alsa_mix_create(struct ct_atc *atc,
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
index 9e5c0c4..60ea231 100644
--- a/sound/pci/ctxfi/ctpcm.c
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -422,5 +422,9 @@
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(atc->pci), 128*1024, 128*1024);
+#ifdef CONFIG_PM
+ atc->pcms[device] = pcm;
+#endif
+
return 0;
}
diff --git a/sound/pci/ctxfi/xfi.c b/sound/pci/ctxfi/xfi.c
index 2d3dd89..7654174 100644
--- a/sound/pci/ctxfi/xfi.c
+++ b/sound/pci/ctxfi/xfi.c
@@ -121,11 +121,33 @@
pci_set_drvdata(pci, NULL);
}
+#ifdef CONFIG_PM
+static int ct_card_suspend(struct pci_dev *pci, pm_message_t state)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct ct_atc *atc = card->private_data;
+
+ return atc->suspend(atc, state);
+}
+
+static int ct_card_resume(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct ct_atc *atc = card->private_data;
+
+ return atc->resume(atc);
+}
+#endif
+
static struct pci_driver ct_driver = {
.name = "SB-XFi",
.id_table = ct_pci_dev_ids,
.probe = ct_card_probe,
.remove = __devexit_p(ct_card_remove),
+#ifdef CONFIG_PM
+ .suspend = ct_card_suspend,
+ .resume = ct_card_resume,
+#endif
};
static int __init ct_card_init(void)
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index c710150..04438f1 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -2,7 +2,6 @@
tristate "Intel HD Audio"
select SND_PCM
select SND_VMASTER
- select SND_JACK if INPUT=y || INPUT=SND
help
Say Y here to include support for Intel "High Definition
Audio" (Azalia) and its compatible devices.
@@ -39,6 +38,14 @@
Say Y here to build a digital beep interface for HD-audio
driver. This interface is used to generate digital beeps.
+config SND_HDA_INPUT_JACK
+ bool "Support jack plugging notification via input layer"
+ depends on INPUT=y || INPUT=SND_HDA_INTEL
+ select SND_JACK
+ help
+ Say Y here to enable the jack plugging notification via
+ input layer.
+
config SND_HDA_CODEC_REALTEK
bool "Build Realtek HD-audio codec support"
default y
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4fcbe21..ac868c5 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -349,7 +349,7 @@
&spec->cur_mux[adc_idx]);
}
-#ifdef CONFIG_SND_JACK
+#ifdef CONFIG_SND_HDA_INPUT_JACK
static void conexant_free_jack_priv(struct snd_jack *jack)
{
struct conexant_jack *jacks = jack->private_data;
@@ -463,7 +463,7 @@
static void conexant_free(struct hda_codec *codec)
{
-#ifdef CONFIG_SND_JACK
+#ifdef CONFIG_SND_HDA_INPUT_JACK
struct conexant_spec *spec = codec->spec;
if (spec->jacks.list) {
struct conexant_jack *jacks = spec->jacks.list;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bf4b78a..3345331 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -250,13 +250,6 @@
ALC883_MODEL_LAST,
};
-/* styles of capture selection */
-enum {
- CAPT_MUX = 0, /* only mux based */
- CAPT_MIX, /* only mixer based */
- CAPT_1MUX_MIX, /* first mux and other mixers */
-};
-
/* for GPIO Poll */
#define GPIO_MASK 0x03
@@ -306,7 +299,6 @@
hda_nid_t *adc_nids;
hda_nid_t *capsrc_nids;
hda_nid_t dig_in_nid; /* digital-in NID; optional */
- int capture_style; /* capture style (CAPT_*) */
/* capture source */
unsigned int num_mux_defs;
@@ -420,12 +412,13 @@
unsigned int mux_idx;
hda_nid_t nid = spec->capsrc_nids ?
spec->capsrc_nids[adc_idx] : spec->adc_nids[adc_idx];
+ unsigned int type;
mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
imux = &spec->input_mux[mux_idx];
- if (spec->capture_style &&
- !(spec->capture_style == CAPT_1MUX_MIX && !adc_idx)) {
+ type = (get_wcaps(codec, nid) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ if (type == AC_WID_AUD_MIX) {
/* Matrix-mixer style (e.g. ALC882) */
unsigned int *cur_val = &spec->cur_mux[adc_idx];
unsigned int i, idx;
@@ -7557,7 +7550,6 @@
spec->stream_digital_playback = &alc882_pcm_digital_playback;
spec->stream_digital_capture = &alc882_pcm_digital_capture;
- spec->capture_style = CAPT_MIX; /* matrix-style capture */
if (!spec->adc_nids && spec->input_mux) {
/* check whether NID 0x07 is valid */
unsigned int wcap = get_wcaps(codec, 0x07);
@@ -9781,7 +9773,6 @@
}
if (!spec->capsrc_nids)
spec->capsrc_nids = alc883_capsrc_nids;
- spec->capture_style = CAPT_MIX; /* matrix-style capture */
spec->init_amp = ALC_INIT_DEFAULT; /* always initialize */
break;
case 0x10ec0889:
@@ -9791,8 +9782,6 @@
}
if (!spec->capsrc_nids)
spec->capsrc_nids = alc889_capsrc_nids;
- spec->capture_style = CAPT_1MUX_MIX; /* 1mux/Nmix-style
- capture */
break;
default:
if (!spec->num_adc_nids) {
@@ -9801,7 +9790,6 @@
}
if (!spec->capsrc_nids)
spec->capsrc_nids = alc883_capsrc_nids;
- spec->capture_style = CAPT_MIX; /* matrix-style capture */
break;
}
@@ -10913,9 +10901,27 @@
return 0;
}
-/* identical with ALC880 */
-#define alc262_auto_create_analog_input_ctls \
- alc880_auto_create_analog_input_ctls
+static int alc262_auto_create_analog_input_ctls(struct alc_spec *spec,
+ const struct auto_pin_cfg *cfg)
+{
+ int err;
+
+ err = alc880_auto_create_analog_input_ctls(spec, cfg);
+ if (err < 0)
+ return err;
+ /* digital-mic input pin is excluded in alc880_auto_create..()
+ * because it's under 0x18
+ */
+ if (cfg->input_pins[AUTO_PIN_MIC] == 0x12 ||
+ cfg->input_pins[AUTO_PIN_FRONT_MIC] == 0x12) {
+ struct hda_input_mux *imux = &spec->private_imux[0];
+ imux->items[imux->num_items].label = "Int Mic";
+ imux->items[imux->num_items].index = 0x09;
+ imux->num_items++;
+ }
+ return 0;
+}
+
/*
* generic initialization of ADC, input mixers and output mixers
@@ -11332,6 +11338,7 @@
SND_PCI_QUIRK(0x104d, 0x8203, "Sony UX-90", ALC262_HIPPO),
SND_PCI_QUIRK(0x104d, 0x820f, "Sony ASSAMD", ALC262_SONY_ASSAMD),
SND_PCI_QUIRK(0x104d, 0x9016, "Sony VAIO", ALC262_AUTO), /* dig-only */
+ SND_PCI_QUIRK(0x104d, 0x9025, "Sony VAIO Z21MN", ALC262_TOSHIBA_S06),
SND_PCI_QUIRK_MASK(0x104d, 0xff00, 0x9000, "Sony VAIO",
ALC262_SONY_ASSAMD),
SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba dynabook SS RX1",
@@ -11539,6 +11546,7 @@
.capsrc_nids = alc262_dmic_capsrc_nids,
.dac_nids = alc262_dac_nids,
.adc_nids = alc262_dmic_adc_nids, /* ADC0 */
+ .num_adc_nids = 1, /* single ADC */
.dig_out_nid = ALC262_DIGOUT_NID,
.num_channel_mode = ARRAY_SIZE(alc262_modes),
.channel_mode = alc262_modes,
@@ -11640,21 +11648,36 @@
spec->stream_digital_playback = &alc262_pcm_digital_playback;
spec->stream_digital_capture = &alc262_pcm_digital_capture;
- spec->capture_style = CAPT_MIX;
if (!spec->adc_nids && spec->input_mux) {
- /* check whether NID 0x07 is valid */
- unsigned int wcap = get_wcaps(codec, 0x07);
-
- /* get type */
- wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
- if (wcap != AC_WID_AUD_IN) {
- spec->adc_nids = alc262_adc_nids_alt;
- spec->num_adc_nids = ARRAY_SIZE(alc262_adc_nids_alt);
- spec->capsrc_nids = alc262_capsrc_nids_alt;
+ int i;
+ /* check whether the digital-mic has to be supported */
+ for (i = 0; i < spec->input_mux->num_items; i++) {
+ if (spec->input_mux->items[i].index >= 9)
+ break;
+ }
+ if (i < spec->input_mux->num_items) {
+ /* use only ADC0 */
+ spec->adc_nids = alc262_dmic_adc_nids;
+ spec->num_adc_nids = 1;
+ spec->capsrc_nids = alc262_dmic_capsrc_nids;
} else {
- spec->adc_nids = alc262_adc_nids;
- spec->num_adc_nids = ARRAY_SIZE(alc262_adc_nids);
- spec->capsrc_nids = alc262_capsrc_nids;
+ /* all analog inputs */
+ /* check whether NID 0x07 is valid */
+ unsigned int wcap = get_wcaps(codec, 0x07);
+
+ /* get type */
+ wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+ if (wcap != AC_WID_AUD_IN) {
+ spec->adc_nids = alc262_adc_nids_alt;
+ spec->num_adc_nids =
+ ARRAY_SIZE(alc262_adc_nids_alt);
+ spec->capsrc_nids = alc262_capsrc_nids_alt;
+ } else {
+ spec->adc_nids = alc262_adc_nids;
+ spec->num_adc_nids =
+ ARRAY_SIZE(alc262_adc_nids);
+ spec->capsrc_nids = alc262_capsrc_nids;
+ }
}
}
if (!spec->cap_mixer && !spec->no_analog)
@@ -13244,26 +13267,8 @@
return 0;
}
-static int alc269_auto_create_analog_input_ctls(struct alc_spec *spec,
- const struct auto_pin_cfg *cfg)
-{
- int err;
-
- err = alc880_auto_create_analog_input_ctls(spec, cfg);
- if (err < 0)
- return err;
- /* digital-mic input pin is excluded in alc880_auto_create..()
- * because it's under 0x18
- */
- if (cfg->input_pins[AUTO_PIN_MIC] == 0x12 ||
- cfg->input_pins[AUTO_PIN_FRONT_MIC] == 0x12) {
- struct hda_input_mux *imux = &spec->private_imux[0];
- imux->items[imux->num_items].label = "Int Mic";
- imux->items[imux->num_items].index = 0x05;
- imux->num_items++;
- }
- return 0;
-}
+#define alc269_auto_create_analog_input_ctls \
+ alc262_auto_create_analog_input_ctls
#ifdef CONFIG_SND_HDA_POWER_SAVE
#define alc269_loopbacks alc880_loopbacks
@@ -15554,7 +15559,6 @@
spec->adc_nids = alc861vd_adc_nids;
spec->num_adc_nids = ARRAY_SIZE(alc861vd_adc_nids);
spec->capsrc_nids = alc861vd_capsrc_nids;
- spec->capture_style = CAPT_MIX;
set_capture_mixer(spec);
set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
@@ -17474,7 +17478,6 @@
spec->adc_nids = alc662_adc_nids;
spec->num_adc_nids = ARRAY_SIZE(alc662_adc_nids);
spec->capsrc_nids = alc662_capsrc_nids;
- spec->capture_style = CAPT_MIX;
if (!spec->cap_mixer)
set_capture_mixer(spec);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 93e47c9..14f3c3e 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -639,7 +639,7 @@
static unsigned int stac92xx_vref_set(struct hda_codec *codec,
hda_nid_t nid, unsigned int new_vref)
{
- unsigned int error;
+ int error;
unsigned int pincfg;
pincfg = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
@@ -2703,7 +2703,7 @@
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
unsigned int new_vref = 0;
- unsigned int error;
+ int error;
hda_nid_t nid = kcontrol->private_value;
if (ucontrol->value.enumerated.item[0] == 0)
@@ -4035,7 +4035,7 @@
AC_VERB_SET_GPIO_DATA, gpiostate); /* sync */
}
-#ifdef CONFIG_SND_JACK
+#ifdef CONFIG_SND_HDA_INPUT_JACK
static void stac92xx_free_jack_priv(struct snd_jack *jack)
{
struct sigmatel_jack *jacks = jack->private_data;
@@ -4047,7 +4047,7 @@
static int stac92xx_add_jack(struct hda_codec *codec,
hda_nid_t nid, int type)
{
-#ifdef CONFIG_SND_JACK
+#ifdef CONFIG_SND_HDA_INPUT_JACK
struct sigmatel_spec *spec = codec->spec;
struct sigmatel_jack *jack;
int def_conf = snd_hda_codec_get_pincfg(codec, nid);
@@ -4336,7 +4336,7 @@
static void stac92xx_free_jacks(struct hda_codec *codec)
{
-#ifdef CONFIG_SND_JACK
+#ifdef CONFIG_SND_HDA_INPUT_JACK
/* free jack instances manually when clearing/reconfiguring */
struct sigmatel_spec *spec = codec->spec;
if (!codec->bus->shutdown && spec->jacks.list) {
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 1ef58c5..949fcaf 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -85,6 +85,7 @@
static int ac97_clock = 48000;
static char *ac97_quirk;
static int dxs_support;
+static int nodelay;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for VIA 82xx bridge.");
@@ -102,6 +103,8 @@
MODULE_PARM_DESC(ac97_quirk, "AC'97 workaround for strange hardware.");
module_param(dxs_support, int, 0444);
MODULE_PARM_DESC(dxs_support, "Support for DXS channels (0 = auto, 1 = enable, 2 = disable, 3 = 48k only, 4 = no VRA, 5 = enable any sample rate)");
+module_param(nodelay, int, 0444);
+MODULE_PARM_DESC(nodelay, "Disable 500ms init delay");
/* just for backward compatibility */
static int enable;
@@ -549,7 +552,8 @@
int err;
err = snd_via82xx_codec_ready(chip, ac97->num);
/* here we need to wait fairly for long time.. */
- msleep(500);
+ if (!nodelay)
+ msleep(500);
}
static void snd_via82xx_codec_write(struct snd_ac97 *ac97,
diff --git a/sound/soc/blackfin/bf5xx-i2s.c b/sound/soc/blackfin/bf5xx-i2s.c
index 9648244..af06904 100644
--- a/sound/soc/blackfin/bf5xx-i2s.c
+++ b/sound/soc/blackfin/bf5xx-i2s.c
@@ -50,6 +50,7 @@
u16 tcr2;
u16 rcr2;
int counter;
+ int configured;
};
static struct bf5xx_i2s_port bf5xx_i2s;
@@ -168,7 +169,7 @@
break;
}
- if (bf5xx_i2s.counter == 1) {
+ if (!bf5xx_i2s.configured) {
/*
* TX and RX are not independent,they are enabled at the
* same time, even if only one side is running. So, we
@@ -177,6 +178,7 @@
*
* CPU DAI:slave mode.
*/
+ bf5xx_i2s.configured = 1;
ret = sport_config_rx(sport_handle, bf5xx_i2s.rcr1,
bf5xx_i2s.rcr2, 0, 0);
if (ret) {
@@ -200,6 +202,9 @@
{
pr_debug("%s enter\n", __func__);
bf5xx_i2s.counter--;
+ /* No active stream, SPORT is allowed to be configured again. */
+ if (!bf5xx_i2s.counter)
+ bf5xx_i2s.configured = 0;
}
static int bf5xx_i2s_probe(struct platform_device *pdev,
@@ -244,8 +249,7 @@
return 0;
}
-static int bf5xx_i2s_resume(struct platform_device *pdev,
- struct snd_soc_dai *dai)
+static int bf5xx_i2s_resume(struct snd_soc_dai *dai)
{
int ret;
struct sport_device *sport =
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index d5be2b3..fefe1a5 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -320,38 +320,6 @@
.codec_dev = &soc_codec_dev_wm8731,
};
-/*
- * FIXME: This is a temporary bodge to avoid cross-tree merge issues.
- * New drivers should register the wm8731 I2C device in the machine
- * setup code (under arch/arm for ARM systems).
- */
-static int wm8731_i2c_register(void)
-{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = 0x1b;
- strlcpy(info.type, "wm8731", I2C_NAME_SIZE);
-
- adapter = i2c_get_adapter(0);
- if (!adapter) {
- printk(KERN_ERR "can't get i2c adapter 0\n");
- return -ENODEV;
- }
-
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- printk(KERN_ERR "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- return -ENODEV;
- }
-
- return 0;
-}
-
static struct platform_device *corgi_snd_device;
static int __init corgi_init(void)
@@ -362,10 +330,6 @@
machine_is_husky()))
return -ENODEV;
- ret = wm8731_i2c_register();
- if (ret != 0)
- return ret;
-
corgi_snd_device = platform_device_alloc("soc-audio", -1);
if (!corgi_snd_device)
return -ENOMEM;
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index a51058f..c5f36e0 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -280,38 +280,6 @@
.num_links = 1,
};
-/*
- * FIXME: This is a temporary bodge to avoid cross-tree merge issues.
- * New drivers should register the wm8731 I2C device in the machine
- * setup code (under arch/arm for ARM systems).
- */
-static int wm8731_i2c_register(void)
-{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = 0x1b;
- strlcpy(info.type, "wm8731", I2C_NAME_SIZE);
-
- adapter = i2c_get_adapter(0);
- if (!adapter) {
- printk(KERN_ERR "can't get i2c adapter 0\n");
- return -ENODEV;
- }
-
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- printk(KERN_ERR "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- return -ENODEV;
- }
-
- return 0;
-}
-
/* poodle audio subsystem */
static struct snd_soc_device poodle_snd_devdata = {
.card = &snd_soc_poodle,
@@ -327,10 +295,6 @@
if (!machine_is_poodle())
return -ENODEV;
- ret = wm8731_i2c_register();
- if (ret != 0)
- return ret;
-
locomo_gpio_set_dir(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_AMP_ON, 0);
/* should we mute HP at startup - burning power ?*/
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index bccb529..ceb68aa 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -13,6 +13,12 @@
#define cpu_relax() asm volatile ("" ::: "memory");
#endif
+#ifdef __s390__
+#include "../../arch/s390/include/asm/unistd.h"
+#define rmb() asm volatile("bcr 15,0" ::: "memory")
+#define cpu_relax() asm volatile("" ::: "memory");
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>