Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] acpi_get_sysname() should be __init
[IA64] Cleanup acpi header to reuse the generic _PDC defines
[IA64] Fix using uninitialized data in _PDC setup
[IA64] start_secondary() and smp_callin() should be __cpuinit
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index afc2867..b49b92e 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -495,29 +495,40 @@
remember: "indent" is not a fix for bad programming.
- Chapter 10: Configuration-files
+ Chapter 10: Kconfig configuration files
-For configuration options (arch/xxx/Kconfig, and all the Kconfig files),
-somewhat different indentation is used.
+For all of the Kconfig* configuration files throughout the source tree,
+the indentation is somewhat different. Lines under a "config" definition
+are indented with one tab, while help text is indented an additional two
+spaces. Example:
-Help text is indented with 2 spaces.
-
-if CONFIG_EXPERIMENTAL
- tristate CONFIG_BOOM
- default n
+config AUDIT
+ bool "Auditing support"
+ depends on NET
help
- Apply nitroglycerine inside the keyboard (DANGEROUS)
- bool CONFIG_CHEER
- depends on CONFIG_BOOM
- default y
- help
- Output nice messages when you explode
-endif
+ Enable auditing infrastructure that can be used with another
+ kernel subsystem, such as SELinux (which requires this for
+ logging of avc messages output). Does not do system-call
+ auditing without CONFIG_AUDITSYSCALL.
-Generally, CONFIG_EXPERIMENTAL should surround all options not considered
-stable. All options that are known to trash data (experimental write-
-support for file-systems, for instance) should be denoted (DANGEROUS), other
-experimental options should be denoted (EXPERIMENTAL).
+Features that might still be considered unstable should be defined as
+dependent on "EXPERIMENTAL":
+
+config SLUB
+ depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT
+ bool "SLUB (Unqueued Allocator)"
+ ...
+
+while seriously dangerous features (such as write support for certain
+filesystems) should advertise this prominently in their prompt string:
+
+config ADFS_FS_RW
+ bool "ADFS write support (DANGEROUS)"
+ depends on ADFS_FS
+ ...
+
+For full documentation on the configuration files, see the file
+Documentation/kbuild/kconfig-language.txt.
Chapter 11: Data structures
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 48123db..ced9207 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -396,26 +396,6 @@
-Managing bug reports
---------------------
-
-One of the best ways to put into practice your hacking skills is by fixing
-bugs reported by other people. Not only you will help to make the kernel
-more stable, you'll learn to fix real world problems and you will improve
-your skills, and other developers will be aware of your presence. Fixing
-bugs is one of the best ways to get merits among other developers, because
-not many people like wasting time fixing other people's bugs.
-
-To work in the already reported bug reports, go to http://bugzilla.kernel.org.
-If you want to be advised of the future bug reports, you can subscribe to the
-bugme-new mailing list (only new bug reports are mailed here) or to the
-bugme-janitor mailing list (every change in the bugzilla is mailed here)
-
- http://lists.osdl.org/mailman/listinfo/bugme-new
- http://lists.osdl.org/mailman/listinfo/bugme-janitors
-
-
-
Mailing lists
-------------
diff --git a/Documentation/block/capability.txt b/Documentation/block/capability.txt
new file mode 100644
index 0000000..2f17294
--- /dev/null
+++ b/Documentation/block/capability.txt
@@ -0,0 +1,15 @@
+Generic Block Device Capability
+===============================================================================
+This file documents the sysfs file block/<disk>/capability
+
+capability is a hex word indicating which capabilities a specific disk
+supports. For more information on bits not listed here, see
+include/linux/genhd.h
+
+Capability Value
+-------------------------------------------------------------------------------
+GENHD_FL_MEDIA_CHANGE_NOTIFY 4
+ When this bit is set, the disk supports Asynchronous Notification
+ of media change events. These events will be broadcast to user
+ space via kernel uevent.
+
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 64e9f6c..595a5ea 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -10,10 +10,12 @@
*.grp
*.gz
*.html
+*.i
*.jpeg
*.ko
*.log
*.lst
+*.moc
*.mod.c
*.o
*.orig
@@ -25,6 +27,9 @@
*.s
*.sgml
*.so
+*.symtypes
+*.tab.c
+*.tab.h
*.tex
*.ver
*.xml
@@ -32,9 +37,13 @@
*_vga16.c
*cscope*
*~
+*.9
+*.9.gz
.*
.cscope
53c700_d.h
+53c7xx_d.h
+53c7xx_u.h
53c8xx_d.h*
BitKeeper
COPYING
@@ -70,9 +79,11 @@
classlist.h*
comp*.log
compile.h*
+conf
config
config-*
config_data.h*
+config_data.gz*
conmakehash
consolemap_deftbl.c*
crc32table.h*
@@ -81,18 +92,23 @@
devlist.h*
docproc
dummy_sym.c*
+elf2ecoff
elfconfig.h*
filelist
fixdep
fore200e_mkfirm
fore200e_pca_fw.c*
+gconf
gen-devlist
gen-kdb_cmds.c*
gen_crc32table
gen_init_cpio
genksyms
gentbl
+*_gray256.c
ikconfig.h*
+initramfs_data.cpio
+initramfs_data.cpio.gz
initramfs_list
kallsyms
kconfig
@@ -100,19 +116,30 @@
keywords.c*
ksym.c*
ksym.h*
+kxgettext
+lkc_defs.h
lex.c*
+lex.*.c
+lk201-map.c
logo_*.c
logo_*_clut224.c
logo_*_mono.c
lxdialog
mach-types
mach-types.h
+machtypes.h
make_times_h
map
maui_boot.h
+mconf
+miboot*
mk_elfconfig
+mkboot
+mkbugboot
mkdep
+mkprep
mktables
+mktree
modpost
modversions.h*
offset.h
@@ -120,18 +147,28 @@
oui.c*
parse.c*
parse.h*
+patches*
+pca200e.bin
+pca200e_ecd.bin2
+piggy.gz
+piggyback
pnmtologo
ppc_defs.h*
promcon_tbl.c*
pss_boot.h
+qconf
raid6altivec*.c
raid6int*.c
raid6tables.c
+relocs
+series
setup
sim710_d.h*
+sImage
sm_tbl*
split-include
tags
+tftpboot.img
times.h*
tkparse
trix_boot.h
@@ -139,8 +176,11 @@
version.h*
vmlinux
vmlinux-*
+vmlinux.aout
vmlinux.lds
vsyscall.lds
wanxlfw.inc
uImage
-zImage
+unifdef
+zImage*
+zconf.hash.c
diff --git a/Documentation/filesystems/directory-locking b/Documentation/filesystems/directory-locking
index d7099a9..ff7b611 100644
--- a/Documentation/filesystems/directory-locking
+++ b/Documentation/filesystems/directory-locking
@@ -1,5 +1,6 @@
Locking scheme used for directory operations is based on two
-kinds of locks - per-inode (->i_sem) and per-filesystem (->s_vfs_rename_sem).
+kinds of locks - per-inode (->i_mutex) and per-filesystem
+(->s_vfs_rename_mutex).
For our purposes all operations fall in 5 classes:
@@ -63,7 +64,7 @@
attempt to acquire some lock and already holds at least one lock. Let's
consider the set of contended locks. First of all, filesystem lock is
not contended, since any process blocked on it is not holding any locks.
-Thus all processes are blocked on ->i_sem.
+Thus all processes are blocked on ->i_mutex.
Non-directory objects are not contended due to (3). Thus link
creation can't be a part of deadlock - it can't be blocked on source
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 5531694..dac45c9 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -107,7 +107,7 @@
---
[informational]
-->link() callers hold ->i_sem on the object we are linking to. Some of your
+->link() callers hold ->i_mutex on the object we are linking to. Some of your
problems might be over...
---
@@ -130,9 +130,9 @@
---
[mandatory]
-->setattr() is called without BKL now. Caller _always_ holds ->i_sem, so
-watch for ->i_sem-grabbing code that might be used by your ->setattr().
-Callers of notify_change() need ->i_sem now.
+->setattr() is called without BKL now. Caller _always_ holds ->i_mutex, so
+watch for ->i_mutex-grabbing code that might be used by your ->setattr().
+Callers of notify_change() need ->i_mutex now.
---
[recommended]
diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt
index 66fa67f..35985b3 100644
--- a/Documentation/i386/boot.txt
+++ b/Documentation/i386/boot.txt
@@ -2,7 +2,7 @@
----------------------------
H. Peter Anvin <hpa@zytor.com>
- Last update 2007-05-16
+ Last update 2007-05-23
On the i386 platform, the Linux kernel uses a rather complicated boot
convention. This has evolved partially due to historical aspects, as
@@ -202,6 +202,8 @@
nonstandard address should fill in the fields marked (reloc); other
boot loaders can ignore those fields.
+The byte order of all fields is littleendian (this is x86, after all.)
+
Field name: setup_secs
Type: read
Offset/size: 0x1f1/1
@@ -280,14 +282,16 @@
Offset/size: 0x206/2
Protocol: 2.00+
- Contains the boot protocol version, e.g. 0x0204 for version 2.04.
+ Contains the boot protocol version, in (major << 8)+minor format,
+ e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
+ 10.17.
Field name: readmode_swtch
Type: modify (optional)
Offset/size: 0x208/4
Protocol: 2.00+
- Boot loader hook (see separate chapter.)
+ Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
Field name: start_sys
Type: read
@@ -304,10 +308,17 @@
If set to a nonzero value, contains a pointer to a NUL-terminated
human-readable kernel version number string, less 0x200. This can
be used to display the kernel version to the user. This value
- should be less than (0x200*setup_sects). For example, if this value
- is set to 0x1c00, the kernel version number string can be found at
- offset 0x1e00 in the kernel file. This is a valid value if and only
- if the "setup_sects" field contains the value 14 or higher.
+ should be less than (0x200*setup_sects).
+
+ For example, if this value is set to 0x1c00, the kernel version
+ number string can be found at offset 0x1e00 in the kernel file.
+ This is a valid value if and only if the "setup_sects" field
+ contains the value 15 or higher, as:
+
+ 0x1c00 < 15*0x200 (= 0x1e00) but
+ 0x1c00 >= 14*0x200 (= 0x1c00)
+
+ 0x1c00 >> 9 = 14, so the minimum value for setup_secs is 15.
Field name: type_of_loader
Type: write (obligatory)
@@ -377,7 +388,7 @@
This field can be modified for two purposes:
- 1. as a boot loader hook (see separate chapter.)
+ 1. as a boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
2. if a bootloader which does not install a hook loads a
relocatable kernel at a nonstandard address it will have to modify
@@ -715,7 +726,7 @@
a demand-loaded module!
-**** ADVANCED BOOT TIME HOOKS
+**** ADVANCED BOOT LOADER HOOKS
If the boot loader runs in a particularly hostile environment (such as
LOADLIN, which runs under DOS) it may be impossible to follow the
@@ -740,4 +751,5 @@
set them up to BOOT_DS (0x18) yourself.
After completing your hook, you should jump to the address
- that was in this field before your boot loader overwrote it.
+ that was in this field before your boot loader overwrote it
+ (relocated, if appropriate.)
diff --git a/Documentation/initrd.txt b/Documentation/initrd.txt
index 15f1b35..d3dc505 100644
--- a/Documentation/initrd.txt
+++ b/Documentation/initrd.txt
@@ -27,16 +27,20 @@
1) the boot loader loads the kernel and the initial RAM disk
2) the kernel converts initrd into a "normal" RAM disk and
frees the memory used by initrd
- 3) initrd is mounted read-write as root
- 4) /linuxrc is executed (this can be any valid executable, including
+ 3) if the root device is not /dev/ram0, the old (deprecated)
+ change_root procedure is followed. see the "Obsolete root change
+ mechanism" section below.
+ 4) root device is mounted. if it is /dev/ram0, the initrd image is
+ then mounted as root
+ 5) /sbin/init is executed (this can be any valid executable, including
shell scripts; it is run with uid 0 and can do basically everything
- init can do)
- 5) linuxrc mounts the "real" root file system
- 6) linuxrc places the root file system at the root directory using the
+ init can do).
+ 6) init mounts the "real" root file system
+ 7) init places the root file system at the root directory using the
pivot_root system call
- 7) the usual boot sequence (e.g. invocation of /sbin/init) is performed
- on the root file system
- 8) the initrd file system is removed
+ 8) init execs the /sbin/init on the new root filesystem, performing
+ the usual boot sequence
+ 9) the initrd file system is removed
Note that changing the root directory does not involve unmounting it.
It is therefore possible to leave processes running on initrd during that
@@ -70,7 +74,7 @@
root=/dev/ram0
initrd is mounted as root, and the normal boot procedure is followed,
- with the RAM disk still mounted as root.
+ with the RAM disk mounted as root.
Compressed cpio images
----------------------
@@ -137,11 +141,11 @@
# mkdir /mnt/dev
# mknod /mnt/dev/console c 5 1
5) copy all the files that are needed to properly use the initrd
- environment. Don't forget the most important file, /linuxrc
- Note that /linuxrc's permissions must include "x" (execute).
+ environment. Don't forget the most important file, /sbin/init
+ Note that /sbin/init's permissions must include "x" (execute).
6) correct operation the initrd environment can frequently be tested
even without rebooting with the command
- # chroot /mnt /linuxrc
+ # chroot /mnt /sbin/init
This is of course limited to initrds that do not interfere with the
general system state (e.g. by reconfiguring network interfaces,
overwriting mounted devices, trying to start already running demons,
@@ -154,7 +158,7 @@
# gzip -9 initrd
For experimenting with initrd, you may want to take a rescue floppy and
-only add a symbolic link from /linuxrc to /bin/sh. Alternatively, you
+only add a symbolic link from /sbin/init to /bin/sh. Alternatively, you
can try the experimental newlib environment [2] to create a small
initrd.
@@ -163,15 +167,14 @@
with an older mechanism, the following boot command line parameters
have to be given:
- root=/dev/ram0 init=/linuxrc rw
+ root=/dev/ram0 rw
(rw is only necessary if writing to the initrd file system.)
With LOADLIN, you simply execute
LOADLIN <kernel> initrd=<disk_image>
-e.g. LOADLIN C:\LINUX\BZIMAGE initrd=C:\LINUX\INITRD.GZ root=/dev/ram0
- init=/linuxrc rw
+e.g. LOADLIN C:\LINUX\BZIMAGE initrd=C:\LINUX\INITRD.GZ root=/dev/ram0 rw
With LILO, you add the option INITRD=<path> to either the global section
or to the section of the respective kernel in /etc/lilo.conf, and pass
@@ -179,7 +182,7 @@
image = /bzImage
initrd = /boot/initrd.gz
- append = "root=/dev/ram0 init=/linuxrc rw"
+ append = "root=/dev/ram0 rw"
and run /sbin/lilo
@@ -191,7 +194,7 @@
Changing the root device
------------------------
-When finished with its duties, linuxrc typically changes the root device
+When finished with its duties, init typically changes the root device
and proceeds with starting the Linux system on the "real" root device.
The procedure involves the following steps:
@@ -217,7 +220,7 @@
# mkdir initrd
# pivot_root . initrd
-Now, the linuxrc process may still access the old root via its
+Now, the init process may still access the old root via its
executable, shared libraries, standard input/output/error, and its
current root directory. All these references are dropped by the
following command:
@@ -249,10 +252,6 @@
It is also possible to use initrd with an NFS-mounted root, see the
pivot_root(8) man page for details.
-Note: if linuxrc or any program exec'ed from it terminates for some
-reason, the old change_root mechanism is invoked (see section "Obsolete
-root change mechanism").
-
Usage scenarios
---------------
@@ -264,15 +263,15 @@
1) system boots from floppy or other media with a minimal kernel
(e.g. support for RAM disks, initrd, a.out, and the Ext2 FS) and
loads initrd
- 2) /linuxrc determines what is needed to (1) mount the "real" root FS
+ 2) /sbin/init determines what is needed to (1) mount the "real" root FS
(i.e. device type, device drivers, file system) and (2) the
distribution media (e.g. CD-ROM, network, tape, ...). This can be
done by asking the user, by auto-probing, or by using a hybrid
approach.
- 3) /linuxrc loads the necessary kernel modules
- 4) /linuxrc creates and populates the root file system (this doesn't
+ 3) /sbin/init loads the necessary kernel modules
+ 4) /sbin/init creates and populates the root file system (this doesn't
have to be a very usable system yet)
- 5) /linuxrc invokes pivot_root to change the root file system and
+ 5) /sbin/init invokes pivot_root to change the root file system and
execs - via chroot - a program that continues the installation
6) the boot loader is installed
7) the boot loader is configured to load an initrd with the set of
@@ -291,7 +290,7 @@
such cases, it is desirable to generate only a small set of kernels
(ideally only one) and to keep the system-specific part of configuration
information as small as possible. In this case, a common initrd could be
-generated with all the necessary modules. Then, only /linuxrc or a file
+generated with all the necessary modules. Then, only /sbin/init or a file
read by it would have to be different.
A third scenario are more convenient recovery disks, because information
@@ -337,6 +336,25 @@
the new, supported mechanism is called "pivot_root".
+Mixed change_root and pivot_root mechanism
+------------------------------------------
+
+In case you did not want to use root=/dev/ram0 to trig the pivot_root mechanism,
+you may create both /linuxrc and /sbin/init in your initrd image.
+
+/linuxrc would contain only the following:
+
+#! /bin/sh
+mount -n -t proc proc /proc
+echo 0x0100 >/proc/sys/kernel/real-root-dev
+umount -n /proc
+
+Once linuxrc exited, the kernel would mount again your initrd as root,
+this time executing /sbin/init. Again, it would be duty of this init
+to build the right environment (maybe using the root= device passed on
+the cmdline) before the final execution of the real /sbin/init.
+
+
Resources
---------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 09220a1..aae2282 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -396,6 +396,26 @@
clocksource is not available, it defaults to PIT.
Format: { pit | tsc | cyclone | pmtmr }
+ clocksource= [GENERIC_TIME] Override the default clocksource
+ Format: <string>
+ Override the default clocksource and use the clocksource
+ with the name specified.
+ Some clocksource names to choose from, depending on
+ the platform:
+ [all] jiffies (this is the base, fallback clocksource)
+ [ACPI] acpi_pm
+ [ARM] imx_timer1,OSTS,netx_timer,mpu_timer2,
+ pxa_timer,timer3,32k_counter,timer0_1
+ [AVR32] avr32
+ [IA-32] pit,hpet,tsc,vmi-timer;
+ scx200_hrt on Geode; cyclone on IBM x440
+ [MIPS] MIPS
+ [PARISC] cr16
+ [S390] tod
+ [SH] SuperH
+ [SPARC64] tick
+ [X86-64] hpet,tsc
+
code_bytes [IA32] How many bytes of object code to print in an
oops report.
Range: 0 - 8192
@@ -1807,10 +1827,6 @@
time Show timing data prefixed to each printk message line
- clocksource= [GENERIC_TIME] Override the default clocksource
- Override the default clocksource and use the clocksource
- with the name specified.
-
tipar.timeout= [HW,PPT]
Set communications timeout in tenths of a second
(default 15).
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 58408dd..650657c 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -24,7 +24,7 @@
(*) Explicit kernel barriers.
- Compiler barrier.
- - The CPU memory barriers.
+ - CPU memory barriers.
- MMIO write barrier.
(*) Implicit kernel memory barriers.
@@ -265,7 +265,7 @@
ordering over the memory operations on either side of the barrier.
Such enforcement is important because the CPUs and other devices in a system
-can use a variety of tricks to improve performance - including reordering,
+can use a variety of tricks to improve performance, including reordering,
deferral and combination of memory operations; speculative loads; speculative
branch prediction and various types of caching. Memory barriers are used to
override or suppress these tricks, allowing the code to sanely control the
@@ -457,7 +457,7 @@
(Q == &A) implies (D == 1)
(Q == &B) implies (D == 4)
-But! CPU 2's perception of P may be updated _before_ its perception of B, thus
+But! CPU 2's perception of P may be updated _before_ its perception of B, thus
leading to the following situation:
(Q == &B) and (D == 2) ????
@@ -573,7 +573,7 @@
the "weaker" type.
[!] Note that the stores before the write barrier would normally be expected to
-match the loads after the read barrier or data dependency barrier, and vice
+match the loads after the read barrier or the data dependency barrier, and vice
versa:
CPU 1 CPU 2
@@ -588,7 +588,7 @@
EXAMPLES OF MEMORY BARRIER SEQUENCES
------------------------------------
-Firstly, write barriers act as a partial orderings on store operations.
+Firstly, write barriers act as partial orderings on store operations.
Consider the following sequence of events:
CPU 1
@@ -608,15 +608,15 @@
+-------+ : :
| | +------+
| |------>| C=3 | } /\
- | | : +------+ }----- \ -----> Events perceptible
- | | : | A=1 | } \/ to rest of system
+ | | : +------+ }----- \ -----> Events perceptible to
+ | | : | A=1 | } \/ the rest of the system
| | : +------+ }
| CPU 1 | : | B=2 | }
| | +------+ }
| | wwwwwwwwwwwwwwww } <--- At this point the write barrier
| | +------+ } requires all stores prior to the
| | : | E=5 | } barrier to be committed before
- | | : +------+ } further stores may be take place.
+ | | : +------+ } further stores may take place
| |------>| D=4 | }
| | +------+
+-------+ : :
@@ -626,7 +626,7 @@
V
-Secondly, data dependency barriers act as a partial orderings on data-dependent
+Secondly, data dependency barriers act as partial orderings on data-dependent
loads. Consider the following sequence of events:
CPU 1 CPU 2
@@ -975,7 +975,7 @@
barrier();
-This a general barrier - lesser varieties of compiler barrier do not exist.
+This is a general barrier - lesser varieties of compiler barrier do not exist.
The compiler barrier has no direct effect on the CPU, which may then reorder
things however it wishes.
@@ -997,7 +997,7 @@
All CPU memory barriers unconditionally imply compiler barriers.
SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
-systems because it is assumed that a CPU will be appear to be self-consistent,
+systems because it is assumed that a CPU will appear to be self-consistent,
and will order overlapping accesses correctly with respect to itself.
[!] Note that SMP memory barriers _must_ be used to control the ordering of
@@ -1146,9 +1146,9 @@
Therefore, from (1), (2) and (4) an UNLOCK followed by an unconditional LOCK is
equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
-[!] Note: one of the consequence of LOCKs and UNLOCKs being only one-way
- barriers is that the effects instructions outside of a critical section may
- seep into the inside of the critical section.
+[!] Note: one of the consequences of LOCKs and UNLOCKs being only one-way
+ barriers is that the effects of instructions outside of a critical section
+ may seep into the inside of the critical section.
A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
because it is possible for an access preceding the LOCK to happen after the
@@ -1239,7 +1239,7 @@
UNLOCK M UNLOCK Q
*D = d; *H = h;
-Then there is no guarantee as to what order CPU #3 will see the accesses to *A
+Then there is no guarantee as to what order CPU 3 will see the accesses to *A
through *H occur in, other than the constraints imposed by the separate locks
on the separate CPUs. It might, for example, see:
@@ -1269,12 +1269,12 @@
UNLOCK M [2]
*H = h;
-CPU #3 might see:
+CPU 3 might see:
*E, LOCK M [1], *C, *B, *A, UNLOCK M [1],
LOCK M [2], *H, *F, *G, UNLOCK M [2], *D
-But assuming CPU #1 gets the lock first, it won't see any of:
+But assuming CPU 1 gets the lock first, CPU 3 won't see any of:
*B, *C, *D, *F, *G or *H preceding LOCK M [1]
*A, *B or *C following UNLOCK M [1]
@@ -1327,12 +1327,12 @@
mmiowb();
spin_unlock(Q);
-this will ensure that the two stores issued on CPU #1 appear at the PCI bridge
-before either of the stores issued on CPU #2.
+this will ensure that the two stores issued on CPU 1 appear at the PCI bridge
+before either of the stores issued on CPU 2.
-Furthermore, following a store by a load to the same device obviates the need
-for an mmiowb(), because the load forces the store to complete before the load
+Furthermore, following a store by a load from the same device obviates the need
+for the mmiowb(), because the load forces the store to complete before the load
is performed:
CPU 1 CPU 2
@@ -1363,7 +1363,7 @@
(*) Atomic operations.
- (*) Accessing devices (I/O).
+ (*) Accessing devices.
(*) Interrupts.
@@ -1399,7 +1399,7 @@
(1) read the next pointer from this waiter's record to know as to where the
next waiter record is;
- (4) read the pointer to the waiter's task structure;
+ (2) read the pointer to the waiter's task structure;
(3) clear the task pointer to tell the waiter it has been given the semaphore;
@@ -1407,7 +1407,7 @@
(5) release the reference held on the waiter's task struct.
-In otherwords, it has to perform this sequence of events:
+In other words, it has to perform this sequence of events:
LOAD waiter->list.next;
LOAD waiter->task;
@@ -1502,7 +1502,7 @@
such the implicit memory barrier effects are necessary.
-The following operation are potential problems as they do _not_ imply memory
+The following operations are potential problems as they do _not_ imply memory
barriers, but might be used for implementing such things as UNLOCK-class
operations:
@@ -1517,7 +1517,7 @@
The following also do _not_ imply memory barriers, and so may require explicit
memory barriers under some circumstances (smp_mb__before_atomic_dec() for
-instance)):
+instance):
atomic_add();
atomic_sub();
@@ -1641,8 +1641,8 @@
indeed have special I/O space access cycles and instructions, but many
CPUs don't have such a concept.
- The PCI bus, amongst others, defines an I/O space concept - which on such
- CPUs as i386 and x86_64 cpus readily maps to the CPU's concept of I/O
+ The PCI bus, amongst others, defines an I/O space concept which - on such
+ CPUs as i386 and x86_64 - readily maps to the CPU's concept of I/O
space. However, it may also be mapped as a virtual I/O space in the CPU's
memory map, particularly on those CPUs that don't support alternate I/O
spaces.
@@ -1664,7 +1664,7 @@
i386 architecture machines, for example, this is controlled by way of the
MTRR registers.
- Ordinarily, these will be guaranteed to be fully ordered and uncombined,,
+ Ordinarily, these will be guaranteed to be fully ordered and uncombined,
provided they're not accessing a prefetchable device.
However, intermediary hardware (such as a PCI bridge) may indulge in
@@ -1689,7 +1689,7 @@
(*) ioreadX(), iowriteX()
- These will perform as appropriate for the type of access they're actually
+ These will perform appropriately for the type of access they're actually
doing, be it inX()/outX() or readX()/writeX().
@@ -1705,7 +1705,7 @@
This means that it must be considered that the CPU will execute its instruction
stream in any order it feels like - or even in parallel - provided that if an
-instruction in the stream depends on the an earlier instruction, then that
+instruction in the stream depends on an earlier instruction, then that
earlier instruction must be sufficiently complete[*] before the later
instruction may proceed; in other words: provided that the appearance of
causality is maintained.
@@ -1795,8 +1795,8 @@
become apparent in the same order on those other CPUs.
-Consider dealing with a system that has pair of CPUs (1 & 2), each of which has
-a pair of parallel data caches (CPU 1 has A/B, and CPU 2 has C/D):
+Consider dealing with a system that has a pair of CPUs (1 & 2), each of which
+has a pair of parallel data caches (CPU 1 has A/B, and CPU 2 has C/D):
:
: +--------+
@@ -1835,7 +1835,7 @@
(*) the coherency queue is not flushed by normal loads to lines already
present in the cache, even though the contents of the queue may
- potentially effect those loads.
+ potentially affect those loads.
Imagine, then, that two writes are made on the first CPU, with a write barrier
between them to guarantee that they will appear to reach that CPU's caches in
@@ -1845,7 +1845,7 @@
=============== =============== =======================================
u == 0, v == 1 and p == &u, q == &u
v = 2;
- smp_wmb(); Make sure change to v visible before
+ smp_wmb(); Make sure change to v is visible before
change to p
<A:modify v=2> v is now in cache A exclusively
p = &v;
@@ -1853,7 +1853,7 @@
The write memory barrier forces the other CPUs in the system to perceive that
the local CPU's caches have apparently been updated in the correct order. But
-now imagine that the second CPU that wants to read those values:
+now imagine that the second CPU wants to read those values:
CPU 1 CPU 2 COMMENT
=============== =============== =======================================
@@ -1861,7 +1861,7 @@
q = p;
x = *q;
-The above pair of reads may then fail to happen in expected order, as the
+The above pair of reads may then fail to happen in the expected order, as the
cacheline holding p may get updated in one of the second CPU's caches whilst
the update to the cacheline holding v is delayed in the other of the second
CPU's caches by some other cache event:
@@ -1916,7 +1916,7 @@
Other CPUs may also have split caches, but must coordinate between the various
cachelets for normal memory accesses. The semantics of the Alpha removes the
-need for coordination in absence of memory barriers.
+need for coordination in the absence of memory barriers.
CACHE COHERENCY VS DMA
@@ -1931,10 +1931,10 @@
In addition, the data DMA'd to RAM by a device may be overwritten by dirty
cache lines being written back to RAM from a CPU's cache after the device has
-installed its own data, or cache lines simply present in a CPUs cache may
-simply obscure the fact that RAM has been updated, until at such time as the
-cacheline is discarded from the CPU's cache and reloaded. To deal with this,
-the appropriate part of the kernel must invalidate the overlapping bits of the
+installed its own data, or cache lines present in the CPU's cache may simply
+obscure the fact that RAM has been updated, until at such time as the cacheline
+is discarded from the CPU's cache and reloaded. To deal with this, the
+appropriate part of the kernel must invalidate the overlapping bits of the
cache on each CPU.
See Documentation/cachetlb.txt for more information on cache management.
@@ -1944,7 +1944,7 @@
-----------------------
Memory mapped I/O usually takes place through memory locations that are part of
-a window in the CPU's memory space that have different properties assigned than
+a window in the CPU's memory space that has different properties assigned than
the usual RAM directed window.
Amongst these properties is usually the fact that such accesses bypass the
@@ -1960,7 +1960,7 @@
=========================
A programmer might take it for granted that the CPU will perform memory
-operations in exactly the order specified, so that if a CPU is, for example,
+operations in exactly the order specified, so that if the CPU is, for example,
given the following piece of code to execute:
a = *A;
@@ -1969,7 +1969,7 @@
d = *D;
*E = e;
-They would then expect that the CPU will complete the memory operation for each
+they would then expect that the CPU will complete the memory operation for each
instruction before moving on to the next one, leading to a definite sequence of
operations as seen by external observers in the system:
@@ -1986,8 +1986,8 @@
(*) loads may be done speculatively, and the result discarded should it prove
to have been unnecessary;
- (*) loads may be done speculatively, leading to the result having being
- fetched at the wrong time in the expected sequence of events;
+ (*) loads may be done speculatively, leading to the result having been fetched
+ at the wrong time in the expected sequence of events;
(*) the order of the memory accesses may be rearranged to promote better use
of the CPU buses and caches;
@@ -2069,12 +2069,12 @@
The DEC Alpha CPU is one of the most relaxed CPUs there is. Not only that,
some versions of the Alpha CPU have a split data cache, permitting them to have
-two semantically related cache lines updating at separate times. This is where
+two semantically-related cache lines updated at separate times. This is where
the data dependency barrier really becomes necessary as this synchronises both
caches with the memory coherence system, thus making it seem like pointer
changes vs new data occur in the right order.
-The Alpha defines the Linux's kernel's memory barrier model.
+The Alpha defines the Linux kernel's memory barrier model.
See the subsection on "Cache Coherency" above.
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index 795fbb4..76ea6c8 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -1,26 +1,30 @@
Overview of Linux kernel SPI support
====================================
-02-Dec-2005
+21-May-2007
What is SPI?
------------
The "Serial Peripheral Interface" (SPI) is a synchronous four wire serial
link used to connect microcontrollers to sensors, memory, and peripherals.
+It's a simple "de facto" standard, not complicated enough to acquire a
+standardization body. SPI uses a master/slave configuration.
The three signal wires hold a clock (SCK, often on the order of 10 MHz),
and parallel data lines with "Master Out, Slave In" (MOSI) or "Master In,
Slave Out" (MISO) signals. (Other names are also used.) There are four
clocking modes through which data is exchanged; mode-0 and mode-3 are most
commonly used. Each clock cycle shifts data out and data in; the clock
-doesn't cycle except when there is data to shift.
+doesn't cycle except when there is a data bit to shift. Not all data bits
+are used though; not every protocol uses those full duplex capabilities.
-SPI masters may use a "chip select" line to activate a given SPI slave
+SPI masters use a fourth "chip select" line to activate a given SPI slave
device, so those three signal wires may be connected to several chips
-in parallel. All SPI slaves support chipselects. Some devices have
+in parallel. All SPI slaves support chipselects; they are usually active
+low signals, labeled nCSx for slave 'x' (e.g. nCS0). Some devices have
other signals, often including an interrupt to the master.
-Unlike serial busses like USB or SMBUS, even low level protocols for
+Unlike serial busses like USB or SMBus, even low level protocols for
SPI slave functions are usually not interoperable between vendors
(except for commodities like SPI memory chips).
@@ -33,6 +37,11 @@
- Some devices may use eight bit words. Others may different word
lengths, such as streams of 12-bit or 20-bit digital samples.
+ - Words are usually sent with their most significant bit (MSB) first,
+ but sometimes the least significant bit (LSB) goes first instead.
+
+ - Sometimes SPI is used to daisy-chain devices, like shift registers.
+
In the same way, SPI slaves will only rarely support any kind of automatic
discovery/enumeration protocol. The tree of slave devices accessible from
a given SPI master will normally be set up manually, with configuration
@@ -44,6 +53,14 @@
Serial Protocol"), PSP ("Programmable Serial Protocol"), and other
related protocols.
+Some chips eliminate a signal line by combining MOSI and MISO, and
+limiting themselves to half-duplex at the hardware level. In fact
+some SPI chips have this signal mode as a strapping option. These
+can be accessed using the same programming interface as SPI, but of
+course they won't handle full duplex transfers. You may find such
+chips described as using "three wire" signaling: SCK, data, nCSx.
+(That data line is sometimes called MOMI or SISO.)
+
Microcontrollers often support both master and slave sides of the SPI
protocol. This document (and Linux) currently only supports the master
side of SPI interactions.
@@ -74,6 +91,32 @@
cards without needing a special purpose MMC/SD/SDIO controller.
+I'm confused. What are these four SPI "clock modes"?
+-----------------------------------------------------
+It's easy to be confused here, and the vendor documentation you'll
+find isn't necessarily helpful. The four modes combine two mode bits:
+
+ - CPOL indicates the initial clock polarity. CPOL=0 means the
+ clock starts low, so the first (leading) edge is rising, and
+ the second (trailing) edge is falling. CPOL=1 means the clock
+ starts high, so the first (leading) edge is falling.
+
+ - CPHA indicates the clock phase used to sample data; CPHA=0 says
+ sample on the leading edge, CPHA=1 means the trailing edge.
+
+ Since the signal needs to stablize before it's sampled, CPHA=0
+ implies that its data is written half a clock before the first
+ clock edge. The chipselect may have made it become available.
+
+Chip specs won't always say "uses SPI mode X" in as many words,
+but their timing diagrams will make the CPOL and CPHA modes clear.
+
+In the SPI mode number, CPOL is the high order bit and CPHA is the
+low order bit. So when a chip's timing diagram shows the clock
+starting low (CPOL=0) and data stabilized for sampling during the
+trailing clock edge (CPHA=1), that's SPI mode 1.
+
+
How do these driver programming interfaces work?
------------------------------------------------
The <linux/spi/spi.h> header file includes kerneldoc, as does the
diff --git a/MAINTAINERS b/MAINTAINERS
index 22ab401..953291d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -332,6 +332,9 @@
W: http://www.linux-usb.org/SpeedTouch/
S: Maintained
+ALCHEMY AU1XX0 MMC DRIVER
+S: Orphan
+
ALI1563 I2C DRIVER
P: Rudolf Marek
M: r.marek@assembler.cz
@@ -418,6 +421,12 @@
M: spyro@f2s.com
S: Maintained
+ARM PRIMECELL MMCI PL180/1 DRIVER
+P: Russell King
+M: rmk@arm.linux.org.uk
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+S: Maintained
+
ARM/ADI ROADRUNNER MACHINE SUPPORT
P: Lennert Buytenhek
M: kernel@wantstofly.org
@@ -649,6 +658,9 @@
W: http://linux-atm.sourceforge.net
S: Maintained
+ATMEL AT91 MCI DRIVER
+S: Orphan
+
ATMEL MACB ETHERNET DRIVER
P: Haavard Skinnemoen
M: hskinnemoen@atmel.com
@@ -2380,6 +2392,13 @@
W: http://popies.net/meye/
S: Maintained
+MOTOROLA IMX MMC/SD HOST CONTROLLER INTERFACE DRIVER
+P: Pavel Pisa
+M: ppisa@pikron.com
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+W: http://mmc.drzeus.cx/wiki/Controllers/Freescale/SDHC
+S: Maintained
+
MOUSE AND MISC DEVICES [GENERAL]
P: Alessandro Rubini
M: rubini@ipvvis.unipv.it
@@ -2900,6 +2919,9 @@
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
S: Maintained
+PXA MMCI DRIVER
+S: Orphan
+
QLOGIC QLA2XXX FC-SCSI DRIVER
P: Andrew Vasquez
M: linux-driver@qlogic.com
@@ -3416,6 +3438,13 @@
M: oakad@yahoo.com
S: Maintained
+TI OMAP MMC INTERFACE DRIVER
+P: Carlos Aguiar, Anderson Briglia and Syed Khasim
+M: linux-omap-open-source@linux.omap.com
+W: http://linux.omap.com
+W: http://www.muru.com/linux/omap/
+S: Maintained
+
TI OMAP RANDOM NUMBER GENERATOR SUPPORT
P: Deepak Saxena
M: dsaxena@plexity.net
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index c2d54b8..8770a5d 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -891,7 +891,7 @@
Don't change this unless you know what you are doing.
config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
+ bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
---help---
Say Y here to experiment with turning CPUs off and on, and to
diff --git a/arch/i386/mach-generic/bigsmp.c b/arch/i386/mach-generic/bigsmp.c
index e932d34..58a477b 100644
--- a/arch/i386/mach-generic/bigsmp.c
+++ b/arch/i386/mach-generic/bigsmp.c
@@ -21,7 +21,7 @@
static int dmi_bigsmp; /* can be set by dmi scanners */
-static __init int hp_ht_bigsmp(struct dmi_system_id *d)
+static int hp_ht_bigsmp(struct dmi_system_id *d)
{
#ifdef CONFIG_X86_GENERICARCH
printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
@@ -31,7 +31,7 @@
}
-static struct dmi_system_id __initdata bigsmp_dmi_table[] = {
+static struct dmi_system_id bigsmp_dmi_table[] = {
{ hp_ht_bigsmp, "HP ProLiant DL760 G2", {
DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
@@ -45,7 +45,7 @@
};
-static int __init probe_bigsmp(void)
+static int probe_bigsmp(void)
{
if (def_to_bigsmp)
dmi_bigsmp = 1;
diff --git a/arch/mips/sgi-ip32/Makefile b/arch/mips/sgi-ip32/Makefile
index 7e14167..60f0227 100644
--- a/arch/mips/sgi-ip32/Makefile
+++ b/arch/mips/sgi-ip32/Makefile
@@ -3,5 +3,5 @@
# under Linux.
#
-obj-y += ip32-berr.o ip32-irq.o ip32-setup.o ip32-reset.o \
+obj-y += ip32-berr.o ip32-irq.o ip32-platform.o ip32-setup.o ip32-reset.o \
crime.o ip32-memory.o
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c
new file mode 100644
index 0000000..120b159
--- /dev/null
+++ b/arch/mips/sgi-ip32/ip32-platform.c
@@ -0,0 +1,20 @@
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+static __init int meth_devinit(void)
+{
+ struct platform_device *pd;
+ int ret;
+
+ pd = platform_device_alloc("meth", -1);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ platform_device_put(pd);
+
+ return ret;
+}
+
+device_initcall(meth_devinit);
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 79471f8..3fc13fa 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -144,9 +144,7 @@
int exit_with = WEXITSTATUS(status);
if (exit_with == 2)
non_fatal("check_ptrace : child exited with status 2. "
- "Serious trouble happening! Try updating "
- "your host skas patch!\nDisabling SYSEMU "
- "support.");
+ "\nDisabling SYSEMU support.\n");
non_fatal("check_ptrace : child exited with exitcode %d, while "
"expecting %d; status 0x%x\n", exit_with,
exitcode, status);
@@ -209,6 +207,7 @@
static void __init check_sysemu(void)
{
void *stack;
+ unsigned long regs[MAX_REG_NR];
int pid, n, status, count=0;
non_fatal("Checking syscall emulation patch for ptrace...");
@@ -225,11 +224,20 @@
fatal("check_sysemu : expected SIGTRAP, got status = %d",
status);
- n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET,
- os_getpid());
- if(n < 0)
- fatal_perror("check_sysemu : failed to modify system call "
- "return");
+ if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
+ fatal_perror("check_sysemu : PTRACE_GETREGS failed");
+ if(PT_SYSCALL_NR(regs) != __NR_getpid){
+ non_fatal("check_sysemu got system call number %d, "
+ "expected %d...", PT_SYSCALL_NR(regs), __NR_getpid);
+ goto fail;
+ }
+
+ n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET, os_getpid());
+ if(n < 0){
+ non_fatal("check_sysemu : failed to modify system call "
+ "return");
+ goto fail;
+ }
if (stop_ptraced_child(pid, stack, 0, 0) < 0)
goto fail_stopped;
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 145bb82..5ce9443 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -428,12 +428,15 @@
memory in the static kernel configuration.
config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
+ bool "Support for suspend on SMP and hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu/cpu#.
- Say N if you want to disable CPU hotplug.
+ This is also required for suspend/hibernation on SMP systems.
+
+ Say N if you want to disable CPU hotplug and don't need to
+ suspend.
config ARCH_ENABLE_MEMORY_HOTPLUG
def_bool y
diff --git a/arch/x86_64/kernel/k8.c b/arch/x86_64/kernel/k8.c
index bc11b32..7377ccb 100644
--- a/arch/x86_64/kernel/k8.c
+++ b/arch/x86_64/kernel/k8.c
@@ -39,10 +39,10 @@
{
int i;
struct pci_dev *dev;
+
if (num_k8_northbridges)
return 0;
- num_k8_northbridges = 0;
dev = NULL;
while ((dev = next_k8_northbridge(dev)) != NULL)
num_k8_northbridges++;
@@ -52,6 +52,11 @@
if (!k8_northbridges)
return -ENOMEM;
+ if (!num_k8_northbridges) {
+ k8_northbridges[0] = NULL;
+ return 0;
+ }
+
flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
if (!flush_words) {
kfree(k8_northbridges);
diff --git a/block/genhd.c b/block/genhd.c
index 93a2cf6..863a8c0 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -423,7 +423,10 @@
{
return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
}
-
+static ssize_t disk_capability_read(struct gendisk *disk, char *page)
+{
+ return sprintf(page, "%x\n", disk->flags);
+}
static ssize_t disk_stats_read(struct gendisk * disk, char *page)
{
preempt_disable();
@@ -466,6 +469,10 @@
.attr = {.name = "size", .mode = S_IRUGO },
.show = disk_size_read
};
+static struct disk_attribute disk_attr_capability = {
+ .attr = {.name = "capability", .mode = S_IRUGO },
+ .show = disk_capability_read
+};
static struct disk_attribute disk_attr_stat = {
.attr = {.name = "stat", .mode = S_IRUGO },
.show = disk_stats_read
@@ -506,6 +513,7 @@
&disk_attr_removable.attr,
&disk_attr_size.attr,
&disk_attr_stat.attr,
+ &disk_attr_capability.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&disk_attr_fail.attr,
#endif
@@ -688,6 +696,27 @@
.show = diskstats_show
};
+static void media_change_notify_thread(struct work_struct *work)
+{
+ struct gendisk *gd = container_of(work, struct gendisk, async_notify);
+ char event[] = "MEDIA_CHANGE=1";
+ char *envp[] = { event, NULL };
+
+ /*
+ * set enviroment vars to indicate which event this is for
+ * so that user space will know to go check the media status.
+ */
+ kobject_uevent_env(&gd->kobj, KOBJ_CHANGE, envp);
+ put_device(gd->driverfs_dev);
+}
+
+void genhd_media_change_notify(struct gendisk *disk)
+{
+ get_device(disk->driverfs_dev);
+ schedule_work(&disk->async_notify);
+}
+EXPORT_SYMBOL_GPL(genhd_media_change_notify);
+
struct gendisk *alloc_disk(int minors)
{
return alloc_disk_node(minors, -1);
@@ -717,6 +746,8 @@
kobj_set_kset_s(disk,block_subsys);
kobject_init(&disk->kobj);
rand_initialize_disk(disk);
+ INIT_WORK(&disk->async_notify,
+ media_change_notify_thread);
}
return disk;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3587cb4..fe08804 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -670,7 +670,7 @@
if (drive == current_reqD)
drive = current_drive;
del_timer(&fd_timeout);
- if (drive < 0 || drive > N_DRIVE) {
+ if (drive < 0 || drive >= N_DRIVE) {
fd_timeout.expires = jiffies + 20UL * HZ;
drive = 0;
} else
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index c72ee97..ca376b9 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -1061,6 +1061,7 @@
if (data & info->ignore_status_mask) {
info->icount.rx++;
+ spin_unlock(&cinfo->card_lock);
return;
}
if (tty_buffer_request_room(tty, 1)) {
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 6d3840e..6a86958 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -102,10 +102,15 @@
u32 flags = 0;
unsigned long iflags;
- if (op->len == 0 || op->src == op->dst)
+ if (op->len == 0)
return 0;
- if (op->flags & AES_FLAGS_COHERENT)
+ /* If the source and destination is the same, then
+ * we need to turn on the coherent flags, otherwise
+ * we don't need to worry
+ */
+
+ if (op->src == op->dst)
flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
if (op->dir == AES_DIR_ENCRYPT)
@@ -120,7 +125,7 @@
_writefield(AES_WRITEIV0_REG, op->iv);
}
- if (op->flags & AES_FLAGS_USRKEY) {
+ if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
flags |= AES_CTRL_WRKEY;
_writefield(AES_WRITEKEY0_REG, op->key);
}
@@ -289,6 +294,7 @@
.setkey = geode_setkey,
.encrypt = geode_cbc_encrypt,
.decrypt = geode_cbc_decrypt,
+ .ivsize = AES_IV_LENGTH,
}
}
};
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
index 8003a36..f479686 100644
--- a/drivers/crypto/geode-aes.h
+++ b/drivers/crypto/geode-aes.h
@@ -20,8 +20,7 @@
#define AES_DIR_DECRYPT 0
#define AES_DIR_ENCRYPT 1
-#define AES_FLAGS_USRKEY (1 << 0)
-#define AES_FLAGS_COHERENT (1 << 1)
+#define AES_FLAGS_HIDDENKEY (1 << 0)
struct geode_aes_op {
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0c160675..366f4a1 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -491,6 +491,12 @@
/* Sysfs Files */
+static ssize_t applesmc_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "applesmc\n");
+}
+
static ssize_t applesmc_position_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -913,6 +919,8 @@
.brightness_set = applesmc_brightness_set,
};
+static DEVICE_ATTR(name, 0444, applesmc_name_show, NULL);
+
static DEVICE_ATTR(position, 0444, applesmc_position_show, NULL);
static DEVICE_ATTR(calibrate, 0644,
applesmc_calibrate_show, applesmc_calibrate_store);
@@ -1197,6 +1205,8 @@
goto out_driver;
}
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_name.attr);
+
/* Create key enumeration sysfs files */
ret = sysfs_create_group(&pdev->dev.kobj, &key_enumeration_group);
if (ret)
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index b77b7d1..ead141e 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -119,15 +119,17 @@
{ "HITACHI CDR-8335" , "ALL" },
{ "HITACHI CDR-8435" , "ALL" },
{ "Toshiba CD-ROM XM-6202B" , "ALL" },
+ { "TOSHIBA CD-ROM XM-1702BC", "ALL" },
{ "CD-532E-A" , "ALL" },
{ "E-IDE CD-ROM CR-840", "ALL" },
{ "CD-ROM Drive/F5A", "ALL" },
{ "WPI CDD-820", "ALL" },
{ "SAMSUNG CD-ROM SC-148C", "ALL" },
{ "SAMSUNG CD-ROM SC", "ALL" },
- { "SanDisk SDP3B-64" , "ALL" },
{ "ATAPI CD-ROM DRIVE 40X MAXIMUM", "ALL" },
{ "_NEC DV5800A", "ALL" },
+ { "SAMSUNG CD-ROM SN-124", "N001" },
+ { "Seagate STT20000A", "ALL" },
{ NULL , NULL }
};
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index d50bd99..ea94c9a 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -67,6 +67,8 @@
case ide_4drives: name = "4drives"; break;
case ide_pmac: name = "mac-io"; break;
case ide_au1xxx: name = "au1xxx"; break;
+ case ide_etrax100: name = "etrax100"; break;
+ case ide_acorn: name = "acorn"; break;
default: name = "(unknown)"; break;
}
len = sprintf(page, "%s\n", name);
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 0e52ad7..8ab33fa 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -317,6 +317,7 @@
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
+ { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{ 0, },
};
MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index 6234f80..47bcd91 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -158,6 +158,12 @@
pci_read_config_word(dev, 0x4A, &csb5_pio);
pci_read_config_byte(dev, 0x54, &ultra_enable);
+ /* If we are in RAID mode (eg AMI MegaIDE) then we can't it
+ turns out trust the firmware configuration */
+
+ if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+ goto oem_setup_failed;
+
/* Per Specified Design by OEM, and ASIC Architect */
if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
(dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
@@ -173,7 +179,7 @@
((dma_stat&(1<<(5+unit)))==(1<<(5+unit)))) {
u8 dmaspeed = dma_timing;
- dma_timing &= ~0xFF;
+ dma_timing &= ~0xFFU;
if ((dmaspeed & 0x20) == 0x20)
dmaspeed = XFER_MW_DMA_2;
else if ((dmaspeed & 0x21) == 0x21)
@@ -187,7 +193,7 @@
} else if (pio_timing) {
u8 piospeed = pio_timing;
- pio_timing &= ~0xFF;
+ pio_timing &= ~0xFFU;
if ((piospeed & 0x20) == 0x20)
piospeed = XFER_PIO_4;
else if ((piospeed & 0x22) == 0x22)
@@ -208,8 +214,8 @@
oem_setup_failed:
- pio_timing &= ~0xFF;
- dma_timing &= ~0xFF;
+ pio_timing &= ~0xFFU;
+ dma_timing &= ~0xFFU;
ultra_timing &= ~(0x0F << (4*unit));
ultra_enable &= ~(0x01 << drive->dn);
csb5_pio &= ~(0x0F << (4*drive->dn));
diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
index ff284ae..82edc1c 100644
--- a/drivers/isdn/hardware/eicon/capifunc.c
+++ b/drivers/isdn/hardware/eicon/capifunc.c
@@ -189,21 +189,21 @@
{
appl->xbuffer_used[ref] = true;
DBG_PRV1(("%d:xbuf_used(%d)", appl->Id, ref + 1))
- return (void *) ref;
+ return (void *)(long)ref;
}
void *TransmitBufferGet(APPL * appl, void *p)
{
- if (appl->xbuffer_internal[(dword) p])
- return appl->xbuffer_internal[(dword) p];
+ if (appl->xbuffer_internal[(dword)(long)p])
+ return appl->xbuffer_internal[(dword)(long)p];
- return appl->xbuffer_ptr[(dword) p];
+ return appl->xbuffer_ptr[(dword)(long)p];
}
void TransmitBufferFree(APPL * appl, void *p)
{
- appl->xbuffer_used[(dword) p] = false;
- DBG_PRV1(("%d:xbuf_free(%d)", appl->Id, ((dword) p) + 1))
+ appl->xbuffer_used[(dword)(long)p] = false;
+ DBG_PRV1(("%d:xbuf_free(%d)", appl->Id, ((dword)(long)p) + 1))
}
void *ReceiveBufferGet(APPL * appl, int Num)
@@ -301,7 +301,7 @@
/* if DATA_B3_IND, copy data too */
if (command == _DATA_B3_I) {
dword data = GET_DWORD(&msg.info.data_b3_ind.Data);
- memcpy(write + length, (void *) data, dlength);
+ memcpy(write + length, (void *)(long)data, dlength);
}
#ifndef DIVA_NO_DEBUGLIB
@@ -318,7 +318,7 @@
if (myDriverDebugHandle.dbgMask & DL_BLK) {
xlog("\x00\x02", &msg, 0x81, length);
for (i = 0; i < dlength; i += 256) {
- DBG_BLK((((char *) GET_DWORD(&msg.info.data_b3_ind.Data)) + i,
+ DBG_BLK((((char *)(long)GET_DWORD(&msg.info.data_b3_ind.Data)) + i,
((dlength - i) < 256) ? (dlength - i) : 256))
if (!(myDriverDebugHandle.dbgMask & DL_PRV0))
break; /* not more if not explicitely requested */
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 784232a..ccd35d0 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1,4 +1,3 @@
-
/*
*
Copyright (c) Eicon Networks, 2002.
@@ -533,7 +532,7 @@
if (m->header.command == _DATA_B3_R)
{
- m->info.data_b3_req.Data = (dword)(TransmitBufferSet (appl, m->info.data_b3_req.Data));
+ m->info.data_b3_req.Data = (dword)(long)(TransmitBufferSet (appl, m->info.data_b3_req.Data));
}
@@ -1032,7 +1031,7 @@
{
TransmitBufferFree (plci->appl,
- (byte *)(((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->info.data_b3_req.Data));
+ (byte *)(long)(((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->info.data_b3_req.Data));
}
@@ -3118,7 +3117,7 @@
&& (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
- data->P = (byte *)(*((dword *)(parms[0].info)));
+ data->P = (byte *)(long)(*((dword *)(parms[0].info)));
}
else
@@ -3151,7 +3150,7 @@
&& (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
{
- TransmitBufferFree (appl, (byte *)(*((dword *)(parms[0].info))));
+ TransmitBufferFree (appl, (byte *)(long)(*((dword *)(parms[0].info))));
}
}
@@ -4057,7 +4056,7 @@
{
if (m->header.command == _DATA_B3_R)
- TransmitBufferFree (appl, (byte *)(m->info.data_b3_req.Data));
+ TransmitBufferFree (appl, (byte *)(long)(m->info.data_b3_req.Data));
dbug(1,dprintf("Error 0x%04x from msg(0x%04x)", i, m->header.command));
break;
@@ -7134,7 +7133,7 @@
case N_UDATA:
if (!(udata_forwarding_table[plci->NL.RBuffer->P[0] >> 5] & (1L << (plci->NL.RBuffer->P[0] & 0x1f))))
{
- plci->RData[0].P = plci->internal_ind_buffer + (-((int)(plci->internal_ind_buffer)) & 3);
+ plci->RData[0].P = plci->internal_ind_buffer + (-((int)(long)(plci->internal_ind_buffer)) & 3);
plci->RData[0].PLength = INTERNAL_IND_BUFFER_SIZE;
plci->NL.R = plci->RData;
plci->NL.RNum = 1;
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index da4196f..8d53a7f 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1551,7 +1551,7 @@
if (retval == 0) { // yuck
cards[i].typ = 0;
nrcards--;
- return retval;
+ return -EINVAL;
}
cs = cards[i].cs;
hisax_d_if->cs = cs;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 1f18f19..b1a26e0 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -485,7 +485,6 @@
{
int k;
- spin_lock_init(&urb->lock);
urb->dev = dev;
urb->pipe = pipe;
urb->complete = complete;
@@ -578,16 +577,14 @@
"HFC-S USB: Stopping iso chain for fifo %i.%i",
fifo->fifonum, i);
#endif
- usb_unlink_urb(fifo->iso[i].purb);
+ usb_kill_urb(fifo->iso[i].purb);
usb_free_urb(fifo->iso[i].purb);
fifo->iso[i].purb = NULL;
}
}
- if (fifo->urb) {
- usb_unlink_urb(fifo->urb);
- usb_free_urb(fifo->urb);
- fifo->urb = NULL;
- }
+ usb_kill_urb(fifo->urb);
+ usb_free_urb(fifo->urb);
+ fifo->urb = NULL;
fifo->active = 0;
}
@@ -1305,7 +1302,11 @@
}
/* default Prot: EURO ISDN, should be a module_param */
hfc->protocol = 2;
- hisax_register(&hfc->d_if, p_b_if, "hfc_usb", hfc->protocol);
+ i = hisax_register(&hfc->d_if, p_b_if, "hfc_usb", hfc->protocol);
+ if (i) {
+ printk(KERN_INFO "HFC-S USB: hisax_register -> %d\n", i);
+ return i;
+ }
#ifdef CONFIG_HISAX_DEBUG
hfc_debug = debug;
@@ -1626,11 +1627,9 @@
#endif
/* init the chip and register the driver */
if (usb_init(context)) {
- if (context->ctrl_urb) {
- usb_unlink_urb(context->ctrl_urb);
- usb_free_urb(context->ctrl_urb);
- context->ctrl_urb = NULL;
- }
+ usb_kill_urb(context->ctrl_urb);
+ usb_free_urb(context->ctrl_urb);
+ context->ctrl_urb = NULL;
kfree(context);
return (-EIO);
}
@@ -1682,21 +1681,15 @@
i);
#endif
}
- if (context->fifos[i].urb) {
- usb_unlink_urb(context->fifos[i].urb);
- usb_free_urb(context->fifos[i].urb);
- context->fifos[i].urb = NULL;
- }
+ usb_kill_urb(context->fifos[i].urb);
+ usb_free_urb(context->fifos[i].urb);
+ context->fifos[i].urb = NULL;
}
context->fifos[i].active = 0;
}
- /* wait for all URBS to terminate */
- mdelay(10);
- if (context->ctrl_urb) {
- usb_unlink_urb(context->ctrl_urb);
- usb_free_urb(context->ctrl_urb);
- context->ctrl_urb = NULL;
- }
+ usb_kill_urb(context->ctrl_urb);
+ usb_free_urb(context->ctrl_urb);
+ context->ctrl_urb = NULL;
hisax_unregister(&context->d_if);
kfree(context); /* free our structure again */
} /* hfc_usb_disconnect */
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 9e088fc..7993e01 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -859,7 +859,11 @@
for (i = 0; i < 2; i++)
b_if[i] = &adapter->bcs[i].b_if;
- hisax_register(&adapter->isac.hisax_d_if, b_if, "fcpcipnp", protocol);
+ if (hisax_register(&adapter->isac.hisax_d_if, b_if, "fcpcipnp",
+ protocol) != 0) {
+ kfree(adapter);
+ adapter = NULL;
+ }
return adapter;
}
diff --git a/drivers/isdn/hisax/st5481_init.c b/drivers/isdn/hisax/st5481_init.c
index bb3a28a..1375123 100644
--- a/drivers/isdn/hisax/st5481_init.c
+++ b/drivers/isdn/hisax/st5481_init.c
@@ -107,12 +107,17 @@
for (i = 0; i < 2; i++)
b_if[i] = &adapter->bcs[i].b_if;
- hisax_register(&adapter->hisax_d_if, b_if, "st5481_usb", protocol);
+ if (hisax_register(&adapter->hisax_d_if, b_if, "st5481_usb",
+ protocol) != 0)
+ goto err_b1;
+
st5481_start(adapter);
usb_set_intfdata(intf, adapter);
return 0;
+ err_b1:
+ st5481_release_b(&adapter->bcs[1]);
err_b:
st5481_release_b(&adapter->bcs[0]);
err_d:
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ff15951..4ada66b 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -407,7 +407,6 @@
{
int k;
- spin_lock_init(&urb->lock);
urb->dev=dev;
urb->pipe=pipe;
urb->interval = 1;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5a4a74c..9620d45 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -255,19 +255,25 @@
}
-static int write_sb_page(mddev_t *mddev, long offset, struct page *page, int wait)
+static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
{
mdk_rdev_t *rdev;
struct list_head *tmp;
+ mddev_t *mddev = bitmap->mddev;
ITERATE_RDEV(mddev, rdev, tmp)
if (test_bit(In_sync, &rdev->flags)
- && !test_bit(Faulty, &rdev->flags))
+ && !test_bit(Faulty, &rdev->flags)) {
+ int size = PAGE_SIZE;
+ if (page->index == bitmap->file_pages-1)
+ size = roundup(bitmap->last_page_size,
+ bdev_hardsect_size(rdev->bdev));
md_super_write(mddev, rdev,
- (rdev->sb_offset<<1) + offset
+ (rdev->sb_offset<<1) + bitmap->offset
+ page->index * (PAGE_SIZE/512),
- PAGE_SIZE,
+ size,
page);
+ }
if (wait)
md_super_wait(mddev);
@@ -282,7 +288,7 @@
struct buffer_head *bh;
if (bitmap->file == NULL)
- return write_sb_page(bitmap->mddev, bitmap->offset, page, wait);
+ return write_sb_page(bitmap, page, wait);
bh = page_buffers(page);
@@ -923,6 +929,7 @@
}
bitmap->filemap[bitmap->file_pages++] = page;
+ bitmap->last_page_size = count;
}
paddr = kmap_atomic(page, KM_USER0);
if (bitmap->flags & BITMAP_HOSTENDIAN)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index d5ecd2d..1927410 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -139,8 +139,6 @@
if (!conf)
return NULL;
- mddev->private = conf;
-
cnt = 0;
conf->array_size = 0;
@@ -232,7 +230,7 @@
* First calculate the device offsets.
*/
conf->disks[0].offset = 0;
- for (i=1; i<mddev->raid_disks; i++)
+ for (i = 1; i < raid_disks; i++)
conf->disks[i].offset =
conf->disks[i-1].offset +
conf->disks[i-1].size;
@@ -244,7 +242,7 @@
curr_offset < conf->array_size;
curr_offset += conf->hash_spacing) {
- while (i < mddev->raid_disks-1 &&
+ while (i < raid_disks-1 &&
curr_offset >= conf->disks[i+1].offset)
i++;
@@ -299,9 +297,11 @@
*/
linear_conf_t *newconf;
- if (rdev->raid_disk != mddev->raid_disks)
+ if (rdev->saved_raid_disk != mddev->raid_disks)
return -EINVAL;
+ rdev->raid_disk = rdev->saved_raid_disk;
+
newconf = linear_conf(mddev,mddev->raid_disks+1);
if (!newconf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c10ce91..1c54f3c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1298,8 +1298,9 @@
ITERATE_RDEV(mddev,rdev2,tmp)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
-
- sb->max_dev = cpu_to_le32(max_dev);
+
+ if (max_dev > le32_to_cpu(sb->max_dev))
+ sb->max_dev = cpu_to_le32(max_dev);
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1365,10 +1366,14 @@
}
/* make sure rdev->size exceeds mddev->size */
if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
- if (mddev->pers)
- /* Cannot change size, so fail */
- return -ENOSPC;
- else
+ if (mddev->pers) {
+ /* Cannot change size, so fail
+ * If mddev->level <= 0, then we don't care
+ * about aligning sizes (e.g. linear)
+ */
+ if (mddev->level > 0)
+ return -ENOSPC;
+ } else
mddev->size = rdev->size;
}
@@ -2142,6 +2147,9 @@
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
set_bit(In_sync, &rdev->flags);
+ } else if (rdev->raid_disk >= mddev->raid_disks) {
+ rdev->raid_disk = -1;
+ clear_bit(In_sync, &rdev->flags);
}
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index dfe3214..2c404f7 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -415,7 +415,7 @@
raid0_conf_t *conf = mddev_to_conf(mddev);
struct strip_zone *zone;
mdk_rdev_t *tmp_dev;
- unsigned long chunk;
+ sector_t chunk;
sector_t block, rsect;
const int rw = bio_data_dir(bio);
@@ -470,7 +470,6 @@
sector_div(x, zone->nb_dev);
chunk = x;
- BUG_ON(x != (sector_t)chunk);
x = block >> chunksize_bits;
tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index d3235f2..e0d474b 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -123,8 +123,12 @@
}
rc = driver_register(&drv->driver);
- if (rc)
- destroy_workqueue(drv->event_queue);
+ if (rc) {
+ if (drv->event) {
+ destroy_workqueue(drv->event_queue);
+ drv->event_queue = NULL;
+ }
+ }
return rc;
};
@@ -256,7 +260,7 @@
int i;
struct i2o_driver *drv;
- for (i = 0; i < I2O_MAX_DRIVERS; i++) {
+ for (i = 0; i < i2o_max_drivers; i++) {
drv = i2o_drivers[i];
if (drv)
@@ -276,7 +280,7 @@
int i;
struct i2o_driver *drv;
- for (i = 0; i < I2O_MAX_DRIVERS; i++) {
+ for (i = 0; i < i2o_max_drivers; i++) {
drv = i2o_drivers[i];
if (drv)
@@ -295,7 +299,7 @@
int i;
struct i2o_driver *drv;
- for (i = 0; i < I2O_MAX_DRIVERS; i++) {
+ for (i = 0; i < i2o_max_drivers; i++) {
drv = i2o_drivers[i];
if (drv)
@@ -314,7 +318,7 @@
int i;
struct i2o_driver *drv;
- for (i = 0; i < I2O_MAX_DRIVERS; i++) {
+ for (i = 0; i < i2o_max_drivers; i++) {
drv = i2o_drivers[i];
if (drv)
@@ -335,17 +339,15 @@
spin_lock_init(&i2o_drivers_lock);
- if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64) ||
- ((i2o_max_drivers ^ (i2o_max_drivers - 1)) !=
- (2 * i2o_max_drivers - 1))) {
- osm_warn("max_drivers set to %d, but must be >=2 and <= 64 and "
- "a power of 2\n", i2o_max_drivers);
+ if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64)) {
+ osm_warn("max_drivers set to %d, but must be >=2 and <= 64\n",
+ i2o_max_drivers);
i2o_max_drivers = I2O_MAX_DRIVERS;
}
osm_info("max drivers = %d\n", i2o_max_drivers);
i2o_drivers =
- kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
+ kcalloc(i2o_max_drivers, sizeof(*i2o_drivers), GFP_KERNEL);
if (!i2o_drivers)
return -ENOMEM;
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 35b139b..5108b7c 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -47,6 +47,7 @@
struct cdev cdev;
struct mutex open_lock;
+ spinlock_t ioctl_lock;
};
static unsigned char phantom_devices[PHANTOM_MAX_MINORS];
@@ -59,8 +60,11 @@
atomic_set(&dev->counter, 0);
iowrite32(PHN_CTL_IRQ, dev->iaddr + PHN_CONTROL);
iowrite32(0x43, dev->caddr + PHN_IRQCTL);
- } else if ((dev->status & PHB_RUNNING) && !(newstat & PHB_RUNNING))
+ ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
+ } else if ((dev->status & PHB_RUNNING) && !(newstat & PHB_RUNNING)) {
iowrite32(0, dev->caddr + PHN_IRQCTL);
+ ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
+ }
dev->status = newstat;
@@ -71,8 +75,8 @@
* File ops
*/
-static int phantom_ioctl(struct inode *inode, struct file *file, u_int cmd,
- u_long arg)
+static long phantom_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct phantom_device *dev = file->private_data;
struct phm_regs rs;
@@ -92,24 +96,32 @@
if (r.reg > 7)
return -EINVAL;
+ spin_lock(&dev->ioctl_lock);
if (r.reg == PHN_CONTROL && (r.value & PHN_CTL_IRQ) &&
- phantom_status(dev, dev->status | PHB_RUNNING))
+ phantom_status(dev, dev->status | PHB_RUNNING)){
+ spin_unlock(&dev->ioctl_lock);
return -ENODEV;
+ }
pr_debug("phantom: writing %x to %u\n", r.value, r.reg);
iowrite32(r.value, dev->iaddr + r.reg);
+ ioread32(dev->iaddr); /* PCI posting */
if (r.reg == PHN_CONTROL && !(r.value & PHN_CTL_IRQ))
phantom_status(dev, dev->status & ~PHB_RUNNING);
+ spin_unlock(&dev->ioctl_lock);
break;
case PHN_SET_REGS:
if (copy_from_user(&rs, argp, sizeof(rs)))
return -EFAULT;
pr_debug("phantom: SRS %u regs %x\n", rs.count, rs.mask);
+ spin_lock(&dev->ioctl_lock);
for (i = 0; i < min(rs.count, 8U); i++)
if ((1 << i) & rs.mask)
iowrite32(rs.values[i], dev->oaddr + i);
+ ioread32(dev->iaddr); /* PCI posting */
+ spin_unlock(&dev->ioctl_lock);
break;
case PHN_GET_REG:
if (copy_from_user(&r, argp, sizeof(r)))
@@ -128,9 +140,11 @@
return -EFAULT;
pr_debug("phantom: GRS %u regs %x\n", rs.count, rs.mask);
+ spin_lock(&dev->ioctl_lock);
for (i = 0; i < min(rs.count, 8U); i++)
if ((1 << i) & rs.mask)
rs.values[i] = ioread32(dev->iaddr + i);
+ spin_unlock(&dev->ioctl_lock);
if (copy_to_user(argp, &rs, sizeof(rs)))
return -EFAULT;
@@ -199,7 +213,7 @@
static struct file_operations phantom_file_ops = {
.open = phantom_open,
.release = phantom_release,
- .ioctl = phantom_ioctl,
+ .unlocked_ioctl = phantom_ioctl,
.poll = phantom_poll,
};
@@ -212,6 +226,7 @@
iowrite32(0, dev->iaddr);
iowrite32(0xc0, dev->iaddr);
+ ioread32(dev->iaddr); /* PCI posting */
atomic_inc(&dev->counter);
wake_up_interruptible(&dev->wait);
@@ -282,11 +297,13 @@
}
mutex_init(&pht->open_lock);
+ spin_lock_init(&pht->ioctl_lock);
init_waitqueue_head(&pht->wait);
cdev_init(&pht->cdev, &phantom_file_ops);
pht->cdev.owner = THIS_MODULE;
iowrite32(0, pht->caddr + PHN_IRQCTL);
+ ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */
retval = request_irq(pdev->irq, phantom_isr,
IRQF_SHARED | IRQF_DISABLED, "phantom", pht);
if (retval) {
@@ -337,6 +354,7 @@
cdev_del(&pht->cdev);
iowrite32(0, pht->caddr + PHN_IRQCTL);
+ ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */
free_irq(pdev->irq, pht);
pci_iounmap(pdev, pht->oaddr);
@@ -358,6 +376,7 @@
struct phantom_device *dev = pci_get_drvdata(pdev);
iowrite32(0, dev->caddr + PHN_IRQCTL);
+ ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
return 0;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a7562f7..540ff4b 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -135,23 +135,6 @@
struct mmc_data data;
};
-static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req)
-{
- struct mmc_blk_data *md = mq->data;
- int stat = BLKPREP_OK;
-
- /*
- * If we have no device, we haven't finished initialising.
- */
- if (!md || !mq->card) {
- printk(KERN_ERR "%s: killing request - no device/host\n",
- req->rq_disk->disk_name);
- stat = BLKPREP_KILL;
- }
-
- return stat;
-}
-
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
{
int err;
@@ -460,7 +443,6 @@
if (ret)
goto err_putdisk;
- md->queue.prep_fn = mmc_blk_prep_rq;
md->queue.issue_fn = mmc_blk_issue_rq;
md->queue.data = md;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 2e77963..dd97bc7 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -20,40 +20,21 @@
#define MMC_QUEUE_SUSPENDED (1 << 0)
/*
- * Prepare a MMC request. Essentially, this means passing the
- * preparation off to the media driver. The media driver will
- * create a mmc_io_request in req->special.
+ * Prepare a MMC request. This just filters out odd stuff.
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
- struct mmc_queue *mq = q->queuedata;
- int ret = BLKPREP_KILL;
-
- if (blk_special_request(req)) {
- /*
- * Special commands already have the command
- * blocks already setup in req->special.
- */
- BUG_ON(!req->special);
-
- ret = BLKPREP_OK;
- } else if (blk_fs_request(req) || blk_pc_request(req)) {
- /*
- * Block I/O requests need translating according
- * to the protocol.
- */
- ret = mq->prep_fn(mq, req);
- } else {
- /*
- * Everything else is invalid.
- */
+ /*
+ * We only like normal block requests.
+ */
+ if (!blk_fs_request(req) && !blk_pc_request(req)) {
blk_dump_rq_flags(req, "MMC bad request");
+ return BLKPREP_KILL;
}
- if (ret == BLKPREP_OK)
- req->cmd_flags |= REQ_DONTPREP;
+ req->cmd_flags |= REQ_DONTPREP;
- return ret;
+ return BLKPREP_OK;
}
static int mmc_queue_thread(void *d)
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index c9f139e..1590b3f 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -10,20 +10,12 @@
struct semaphore thread_sem;
unsigned int flags;
struct request *req;
- int (*prep_fn)(struct mmc_queue *, struct request *);
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
struct scatterlist *sg;
};
-struct mmc_io_request {
- struct request *rq;
- int num;
- struct mmc_command selcmd; /* mmc_queue private */
- struct mmc_command cmd[4]; /* max 4 commands */
-};
-
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c5baa19..30fd479 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2218,7 +2218,7 @@
config VIA_VELOCITY
tristate "VIA Velocity support"
- depends on NET_PCI && PCI
+ depends on PCI
select CRC32
select CRC_CCITT
select MII
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
index 269d097..d0f87d8 100644
--- a/drivers/net/chelsio/suni1x10gexp_regs.h
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -105,7 +105,7 @@
#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_LOW(filterId) (0x204A + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_MID(filterId) (0x204B + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_HIGH(filterId)(0x204C + mSUNI1x10GEXP_MAC_FILTER_OFFSET(filterId))
-#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId) (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId)
+#define mSUNI1x10GEXP_REG_RXXG_EXACT_MATCH_VID(filterId) (0x2062 + mSUNI1x10GEXP_MAC_VID_FILTER_OFFSET(filterId))
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_LOW 0x204A
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_MID 0x204B
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_0_HIGH 0x204C
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7a01802..4154fd0 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -195,7 +195,7 @@
#define NVREG_IRQ_TX_FORCED 0x0100
#define NVREG_IRQ_RECOVER_ERROR 0x8000
#define NVREG_IRQMASK_THROUGHPUT 0x00df
-#define NVREG_IRQMASK_CPU 0x0040
+#define NVREG_IRQMASK_CPU 0x0060
#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 0343ea1..92b403b 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -8,15 +8,16 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <linux/kernel.h> /* printk() */
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/errno.h> /* error codes */
-#include <linux/types.h> /* size_t */
-#include <linux/interrupt.h> /* mark_bh */
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/in6.h>
@@ -33,7 +34,6 @@
#include <asm/io.h>
#include <asm/scatterlist.h>
-#include <linux/dma-mapping.h>
#include "meth.h"
@@ -51,8 +51,6 @@
static const char *meth_str="SGI O2 Fast Ethernet";
-MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
-MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
#define HAVE_TX_TIMEOUT
/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
@@ -784,15 +782,15 @@
/*
* The init function.
*/
-static struct net_device *meth_init(void)
+static int __init meth_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct meth_private *priv;
- int ret;
+ int err;
dev = alloc_etherdev(sizeof(struct meth_private));
if (!dev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
dev->open = meth_open;
dev->stop = meth_release;
@@ -808,11 +806,12 @@
priv = netdev_priv(dev);
spin_lock_init(&priv->meth_lock);
+ SET_NETDEV_DEV(dev, &pdev->dev);
- ret = register_netdev(dev);
- if (ret) {
+ err = register_netdev(dev);
+ if (err) {
free_netdev(dev);
- return ERR_PTR(ret);
+ return err;
}
printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n",
@@ -820,21 +819,44 @@
return 0;
}
-static struct net_device *meth_dev;
+static int __exit meth_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver meth_driver = {
+ .probe = meth_probe,
+ .remove = __devexit_p(meth_remove),
+ .driver = {
+ .name = "meth",
+ }
+};
static int __init meth_init_module(void)
{
- meth_dev = meth_init();
- if (IS_ERR(meth_dev))
- return PTR_ERR(meth_dev);
- return 0;
+ int err;
+
+ err = platform_driver_register(&meth_driver);
+ if (err)
+ printk(KERN_ERR "Driver registration failed\n");
+
+ return err;
}
static void __exit meth_exit_module(void)
{
- unregister_netdev(meth_dev);
- free_netdev(meth_dev);
+ platform_driver_unregister(&meth_driver);
}
module_init(meth_init_module);
module_exit(meth_exit_module);
+
+MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
+MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 68c99b4c..bb96691 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -89,6 +89,7 @@
/*-----------------------------------------------------------------------------
* This is used for updating internal mii regs from the status
*-----------------------------------------------------------------------------*/
+#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX)
static int fixed_mdio_update_regs(struct fixed_info *fixed)
{
u16 *regs = fixed->regs;
@@ -165,6 +166,7 @@
/*nothing here - no way/need to reset it*/
return 0;
}
+#endif
static int fixed_config_aneg(struct phy_device *phydev)
{
@@ -194,6 +196,7 @@
* number is used to create multiple fixed PHYs, so that several devices can
* utilize them simultaneously.
*-----------------------------------------------------------------------------*/
+#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX)
static int fixed_mdio_register_device(int number, int speed, int duplex)
{
struct mii_bus *new_bus;
@@ -301,6 +304,7 @@
return err;
}
+#endif
MODULE_DESCRIPTION("Fixed PHY device & driver for PAL");
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index c3964c3..ef84d7c 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1014,12 +1014,12 @@
*/
}
- /* pass skb up to stack */
- netif_receive_skb(skb);
-
/* update netdevice statistics */
card->netdev_stats.rx_packets++;
card->netdev_stats.rx_bytes += skb->len;
+
+ /* pass skb up to stack */
+ netif_receive_skb(skb);
}
#ifdef DEBUG
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index c2ccbd0..18b731b 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -23,11 +23,8 @@
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/fsl_devices.h>
-#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index f96966d..7bcb82f 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -260,8 +260,6 @@
{},
};
-MODULE_DEVICE_TABLE(of, uec_mdio_match);
-
static struct of_platform_driver uec_mdio_driver = {
.name = DRV_NAME,
.probe = uec_mdio_probe,
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index d5ef97b..6d95cac 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1458,6 +1458,10 @@
// IO-DATA ETG-US2
USB_DEVICE (0x04bb, 0x0930),
.driver_info = (unsigned long) &ax88178_info,
+}, {
+ // Belkin F5D5055
+ USB_DEVICE(0x050d, 0x5055),
+ .driver_info = (unsigned long) &ax88178_info,
},
{ }, // END
};
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 606bdd0..dfe2764 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -46,7 +46,7 @@
u8 libertas_data_rate_to_index(u32 rate);
void libertas_get_fwversion(wlan_adapter * adapter, char *fwversion, int maxlen);
-int libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb);
+void libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb);
/** The proc fs interface */
int libertas_process_rx_command(wlan_private * priv);
diff --git a/drivers/net/wireless/libertas/fw.c b/drivers/net/wireless/libertas/fw.c
index 441123c..5c63c9b 100644
--- a/drivers/net/wireless/libertas/fw.c
+++ b/drivers/net/wireless/libertas/fw.c
@@ -333,18 +333,22 @@
unsigned long flags;
ptempnode = adapter->cur_cmd;
+ if (ptempnode == NULL) {
+ lbs_pr_debug(1, "PTempnode Empty\n");
+ return;
+ }
+
cmd = (struct cmd_ds_command *)ptempnode->bufvirtualaddr;
+ if (!cmd) {
+ lbs_pr_debug(1, "cmd is NULL\n");
+ return;
+ }
lbs_pr_info("command_timer_fn fired (%x)\n", cmd->command);
if (!adapter->fw_ready)
return;
- if (ptempnode == NULL) {
- lbs_pr_debug(1, "PTempnode Empty\n");
- return;
- }
-
spin_lock_irqsave(&adapter->driver_lock, flags);
adapter->cur_cmd = NULL;
spin_unlock_irqrestore(&adapter->driver_lock, flags);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index d17924f..96619a32 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -136,7 +136,7 @@
LEAVE();
}
-int libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb)
+void libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb)
{
lbs_pr_debug(1, "skb->data=%p\n", skb->data);
@@ -148,8 +148,6 @@
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx(skb);
-
- return 0;
}
/**
@@ -269,15 +267,11 @@
wlan_compute_rssi(priv, p_rx_pd);
lbs_pr_debug(1, "RX Data: size of actual packet = %d\n", skb->len);
- if (libertas_upload_rx_packet(priv, skb)) {
- lbs_pr_debug(1, "RX error: libertas_upload_rx_packet"
- " returns failure\n");
- ret = -1;
- goto done;
- }
priv->stats.rx_bytes += skb->len;
priv->stats.rx_packets++;
+ libertas_upload_rx_packet(priv, skb);
+
ret = 0;
done:
LEAVE();
@@ -438,22 +432,14 @@
wlan_compute_rssi(priv, prxpd);
lbs_pr_debug(1, "RX Data: size of actual packet = %d\n", skb->len);
-
- if (libertas_upload_rx_packet(priv, skb)) {
- lbs_pr_debug(1, "RX error: libertas_upload_rx_packet "
- "returns failure\n");
- ret = -1;
- goto done;
- }
-
priv->stats.rx_bytes += skb->len;
priv->stats.rx_packets++;
+ libertas_upload_rx_packet(priv, skb);
+
ret = 0;
done:
LEAVE();
- skb->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */
-
return (ret);
}
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 052359f..11f36be 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -329,8 +329,8 @@
int ret = 0;
#if defined(CONFIG_PPC_MERGE)
- cdm = mpc52xx_find_and_map("mpc52xx-cdm");
- gpio = mpc52xx_find_and_map("mpc52xx-gpio");
+ cdm = mpc52xx_find_and_map("mpc5200-cdm");
+ gpio = mpc52xx_find_and_map("mpc5200-gpio");
#else
cdm = ioremap(MPC52xx_PA(MPC52xx_CDM_OFFSET), MPC52xx_CDM_SIZE);
gpio = ioremap(MPC52xx_PA(MPC52xx_GPIO_OFFSET), MPC52xx_GPIO_SIZE);
@@ -445,9 +445,6 @@
struct spi_master *master;
int ret;
- if (pdata == NULL)
- return -ENODEV;
-
master = spi_alloc_master(dev, sizeof *mps);
if (master == NULL)
return -ENOMEM;
@@ -594,17 +591,17 @@
}
regaddr64 = of_translate_address(op->node, regaddr_p);
+ /* get PSC id (1..6, used by port_config) */
if (op->dev.platform_data == NULL) {
- struct device_node *np;
- int i = 0;
+ const u32 *psc_nump;
- for_each_node_by_type(np, "spi") {
- if (of_find_device_by_node(np) == op) {
- id = i;
- break;
- }
- i++;
+ psc_nump = of_get_property(op->node, "cell-index", NULL);
+ if (!psc_nump || *psc_nump > 5) {
+ printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid "
+ "cell-index property\n", op->node->full_name);
+ return -EINVAL;
}
+ id = *psc_nump + 1;
}
return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
@@ -617,7 +614,7 @@
}
static struct of_device_id mpc52xx_psc_spi_of_match[] = {
- { .type = "spi", .compatible = "mpc52xx-psc-spi", },
+ { .type = "spi", .compatible = "mpc5200-psc-spi", },
{},
};
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index 96f62b2..95183e1 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -358,11 +358,11 @@
switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
case SPI_MODE_0:
case SPI_MODE_3:
- flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE;
+ flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE;
break;
case SPI_MODE_1:
case SPI_MODE_2:
- flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE;
+ flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE;
break;
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 225d6b2..d04242a 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -168,6 +168,12 @@
n--, k_tmp++, u_tmp++) {
k_tmp->len = u_tmp->len;
+ total += k_tmp->len;
+ if (total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+
if (u_tmp->rx_buf) {
k_tmp->rx_buf = buf;
if (!access_ok(VERIFY_WRITE, u_tmp->rx_buf, u_tmp->len))
@@ -179,12 +185,6 @@
u_tmp->len))
goto done;
}
-
- total += k_tmp->len;
- if (total > bufsiz) {
- status = -EMSGSIZE;
- goto done;
- }
buf += k_tmp->len;
k_tmp->cs_change = !!u_tmp->cs_change;
@@ -364,6 +364,7 @@
break;
}
if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
+ kfree(ioc);
retval = -EFAULT;
break;
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index a524805..c7a7c59 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -193,6 +193,19 @@
out_be32(non_ehci + FSL_SOC_USB_CTRL, 0x00000004);
out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0000001b);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ /*
+ * Turn on cache snooping hardware, since some PowerPC platforms
+ * wholly rely on hardware to deal with cache coherent
+ */
+
+ /* Setup Snooping for all the 4GB space */
+ /* SNOOP1 starts from 0x0, size 2G */
+ out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
+ /* SNOOP2 starts from 0x80000000, size 2G */
+ out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
+#endif
+
if (pdata->operating_mode == FSL_USB2_DR_HOST)
mpc83xx_setup_phy(ehci, pdata->phy_mode, 0);
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index f28736a..b5e59db 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -34,4 +34,5 @@
#define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */
#define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */
#define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */
+#define SNOOP_SIZE_2GB 0x1e
#endif /* _EHCI_FSL_H */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 4d7485f..6e1f1ea 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -704,6 +704,91 @@
This is the frame buffer device driver for the CGsix (GX, TurboGX)
frame buffer.
+config FB_FFB
+ bool "Creator/Creator3D/Elite3D support"
+ depends on FB_SBUS && SPARC64
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the frame buffer device driver for the Creator, Creator3D,
+ and Elite3D graphics boards.
+
+config FB_TCX
+ bool "TCX (SS4/SS5 only) support"
+ depends on FB_SBUS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the frame buffer device driver for the TCX 24/8bit frame
+ buffer.
+
+config FB_CG14
+ bool "CGfourteen (SX) support"
+ depends on FB_SBUS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the frame buffer device driver for the CGfourteen frame
+ buffer on Desktop SPARCsystems with the SX graphics option.
+
+config FB_P9100
+ bool "P9100 (Sparcbook 3 only) support"
+ depends on FB_SBUS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the frame buffer device driver for the P9100 card
+ supported on Sparcbook 3 machines.
+
+config FB_LEO
+ bool "Leo (ZX) support"
+ depends on FB_SBUS
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the frame buffer device driver for the SBUS-based Sun ZX
+ (leo) frame buffer cards.
+
+config FB_IGA
+ bool "IGA 168x display support"
+ depends on FB && SPARC32
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the framebuffer device for the INTERGRAPHICS 1680 and
+ successor frame buffer cards.
+
+config FB_XVR500
+ bool "Sun XVR-500 3DLABS Wildcat support"
+ depends on FB && PCI && SPARC64
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the framebuffer device for the Sun XVR-500 and similar
+ graphics cards based upon the 3DLABS Wildcat chipset. The driver
+ only works on sparc64 systems where the system firwmare has
+ mostly initialized the card already. It is treated as a
+ completely dumb framebuffer device.
+
+config FB_XVR2500
+ bool "Sun XVR-2500 3DLABS Wildcat support"
+ depends on FB && PCI && SPARC64
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ This is the framebuffer device for the Sun XVR-2500 and similar
+ graphics cards based upon the 3DLABS Wildcat chipset. The driver
+ only works on sparc64 systems where the system firwmare has
+ mostly initialized the card already. It is treated as a
+ completely dumb framebuffer device.
+
config FB_PVR2
tristate "NEC PowerVR 2 display support"
depends on FB && SH_DREAMCAST
@@ -1195,7 +1280,7 @@
config FB_ATY_CT
bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
depends on PCI && FB_ATY
- default y if SPARC64 && FB_PCI
+ default y if SPARC64 && PCI
help
Say Y here to support use of ATI's 64-bit Rage boards (or other
boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
@@ -1484,95 +1569,6 @@
source "drivers/video/geode/Kconfig"
-config FB_FFB
- bool "Creator/Creator3D/Elite3D support"
- depends on FB_SBUS && SPARC64
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the frame buffer device driver for the Creator, Creator3D,
- and Elite3D graphics boards.
-
-config FB_TCX
- bool "TCX (SS4/SS5 only) support"
- depends on FB_SBUS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the frame buffer device driver for the TCX 24/8bit frame
- buffer.
-
-config FB_CG14
- bool "CGfourteen (SX) support"
- depends on FB_SBUS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the frame buffer device driver for the CGfourteen frame
- buffer on Desktop SPARCsystems with the SX graphics option.
-
-config FB_P9100
- bool "P9100 (Sparcbook 3 only) support"
- depends on FB_SBUS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the frame buffer device driver for the P9100 card
- supported on Sparcbook 3 machines.
-
-config FB_LEO
- bool "Leo (ZX) support"
- depends on FB_SBUS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the frame buffer device driver for the SBUS-based Sun ZX
- (leo) frame buffer cards.
-
-config FB_XVR500
- bool "Sun XVR-500 3DLABS Wildcat support"
- depends on (FB = y) && PCI && SPARC64
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the framebuffer device for the Sun XVR-500 and similar
- graphics cards based upon the 3DLABS Wildcat chipset. The driver
- only works on sparc64 systems where the system firwmare has
- mostly initialized the card already. It is treated as a
- completely dumb framebuffer device.
-
-config FB_XVR2500
- bool "Sun XVR-2500 3DLABS Wildcat support"
- depends on (FB = y) && PCI && SPARC64
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the framebuffer device for the Sun XVR-2500 and similar
- graphics cards based upon the 3DLABS Wildcat chipset. The driver
- only works on sparc64 systems where the system firwmare has
- mostly initialized the card already. It is treated as a
- completely dumb framebuffer device.
-
-config FB_PCI
- bool "PCI framebuffers"
- depends on (FB = y) && PCI && SPARC
-
-config FB_IGA
- bool "IGA 168x display support"
- depends on SPARC32 && FB_PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the framebuffer device for the INTERGRAPHICS 1680 and
- successor frame buffer cards.
-
config FB_HIT
tristate "HD64461 Frame Buffer support"
depends on FB && HD64461
@@ -1796,9 +1792,10 @@
config FB_PS3
bool "PS3 GPU framebuffer driver"
depends on (FB = y) && PS3_PS3AV
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
---help---
Include support for the virtual frame buffer in the PS3 platform.
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 267c1ff..a125898 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -394,26 +394,18 @@
/* initialize GPIOs */
imx_gpio_mode(PD6_PF_LSCLK);
- imx_gpio_mode(PD10_PF_SPL_SPR);
imx_gpio_mode(PD11_PF_CONTRAST);
imx_gpio_mode(PD14_PF_FLM_VSYNC);
imx_gpio_mode(PD13_PF_LP_HSYNC);
- imx_gpio_mode(PD7_PF_REV);
- imx_gpio_mode(PD8_PF_CLS);
-
-#ifndef CONFIG_MACH_PIMX1
- /* on PiMX1 used as buffers enable signal
- */
- imx_gpio_mode(PD9_PF_PS);
-#endif
-
-#ifndef CONFIG_MACH_MX1FS2
- /* on mx1fs2 this pin is used to (de)activate the display, so we need
- * it as a normal gpio
- */
imx_gpio_mode(PD12_PF_ACD_OE);
-#endif
+ /* These are only needed for Sharp HR TFT displays */
+ if (fbi->pcr & PCR_SHARP) {
+ imx_gpio_mode(PD7_PF_REV);
+ imx_gpio_mode(PD8_PF_CLS);
+ imx_gpio_mode(PD9_PF_PS);
+ imx_gpio_mode(PD10_PF_SPL_SPR);
+ }
}
#ifdef CONFIG_PM
@@ -476,7 +468,6 @@
info->fbops = &imxfb_ops;
info->flags = FBINFO_FLAG_DEFAULT;
- info->pseudo_palette = (fbi + 1);
fbi->rgb[RGB_16] = &def_rgb_16;
fbi->rgb[RGB_8] = &def_rgb_8;
@@ -499,6 +490,7 @@
info->var.sync = inf->sync;
info->var.grayscale = inf->cmap_greyscale;
fbi->cmap_inverse = inf->cmap_inverse;
+ fbi->cmap_static = inf->cmap_static;
fbi->pcr = inf->pcr;
fbi->lscr1 = inf->lscr1;
fbi->dmacr = inf->dmacr;
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index ab5e668..0a04483 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -183,15 +183,17 @@
index = PM2VR_RD_INDEXED_DATA;
break;
}
- mb();
+ wmb();
pm2_WR(p, index, v);
+ wmb();
}
static inline void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
{
pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
- mb();
+ wmb();
pm2_WR(p, PM2VR_RD_INDEXED_DATA, v);
+ wmb();
}
#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT
@@ -466,11 +468,9 @@
WAIT_FIFO(par, 8);
pm2_WR(par, PM2VR_RD_INDEX_HIGH, PM2VI_RD_MCLK_CONTROL >> 8);
pm2v_RDAC_WR(par, PM2VI_RD_MCLK_CONTROL, 0);
- wmb();
pm2v_RDAC_WR(par, PM2VI_RD_MCLK_PRESCALE, m);
pm2v_RDAC_WR(par, PM2VI_RD_MCLK_FEEDBACK, n);
pm2v_RDAC_WR(par, PM2VI_RD_MCLK_POSTSCALE, p);
- wmb();
pm2v_RDAC_WR(par, PM2VI_RD_MCLK_CONTROL, 1);
rmb();
for (i = 256;
@@ -483,12 +483,9 @@
pm2_mnp(clk, &m, &n, &p);
WAIT_FIFO(par, 10);
pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_3, 6);
- wmb();
pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_1, m);
pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_2, n);
- wmb();
pm2_RDAC_WR(par, PM2I_RD_MEMORY_CLOCK_3, 8|p);
- wmb();
pm2_RDAC_RD(par, PM2I_RD_MEMORY_CLOCK_STATUS);
rmb();
for (i = 256;
@@ -509,12 +506,9 @@
pm2_mnp(clk, &m, &n, &p);
WAIT_FIFO(par, 8);
pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A3, 0);
- wmb();
pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A1, m);
pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A2, n);
- wmb();
pm2_RDAC_WR(par, PM2I_RD_PIXEL_CLOCK_A3, 8|p);
- wmb();
pm2_RDAC_RD(par, PM2I_RD_PIXEL_CLOCK_STATUS);
rmb();
for (i = 256;
@@ -1066,10 +1060,9 @@
if (!w || !h)
return;
- WAIT_FIFO(par, 6);
+ WAIT_FIFO(par, 5);
pm2_WR(par, PM2R_CONFIG, PM2F_CONFIG_FB_WRITE_ENABLE |
PM2F_CONFIG_FB_READ_SOURCE_ENABLE);
- pm2_WR(par, PM2R_FB_PIXEL_OFFSET, 0);
if (copy)
pm2_WR(par, PM2R_FB_SOURCE_DELTA,
((ysrc-y) & 0xfff) << 16 | ((xsrc-x) & 0xfff));
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index c77a1a1..616a0c0 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -52,11 +52,6 @@
static char *mode_option __devinitdata;
/*
- * If your driver supports multiple boards, you should make the
- * below data types arrays, or allocate them dynamically (using kmalloc()).
- */
-
-/*
* This structure defines the hardware state of the graphics card. Normally
* you place this in a header file in linux/include/video. This file usually
* also includes register information. That allows other driver subsystems
@@ -67,7 +62,7 @@
unsigned char __iomem *v_regs;/* virtual address of p_regs */
u32 video; /* video flags before blanking */
u32 base; /* screen base (xoffset+yoffset) in 128 bits unit */
- u32 palette[16];
+ u32 palette[16];
};
/*
@@ -104,36 +99,28 @@
while (PM3_READ_REG(par, PM3InFIFOSpace) < n);
}
-static inline void PM3_SLOW_WRITE_REG(struct pm3_par *par, s32 off, u32 v)
-{
- if (par->v_regs) {
- mb();
- PM3_WAIT(par, 1);
- wmb();
- PM3_WRITE_REG(par, off, v);
- }
-}
-
-static inline void PM3_SET_INDEX(struct pm3_par *par, unsigned index)
-{
- PM3_SLOW_WRITE_REG(par, PM3RD_IndexHigh, (index >> 8) & 0xff);
- PM3_SLOW_WRITE_REG(par, PM3RD_IndexLow, index & 0xff);
-}
-
static inline void PM3_WRITE_DAC_REG(struct pm3_par *par, unsigned r, u8 v)
{
- PM3_SET_INDEX(par, r);
+ PM3_WAIT(par, 3);
+ PM3_WRITE_REG(par, PM3RD_IndexHigh, (r >> 8) & 0xff);
+ PM3_WRITE_REG(par, PM3RD_IndexLow, r & 0xff);
wmb();
PM3_WRITE_REG(par, PM3RD_IndexedData, v);
+ wmb();
}
static inline void pm3fb_set_color(struct pm3_par *par, unsigned char regno,
unsigned char r, unsigned char g, unsigned char b)
{
- PM3_SLOW_WRITE_REG(par, PM3RD_PaletteWriteAddress, regno);
- PM3_SLOW_WRITE_REG(par, PM3RD_PaletteData, r);
- PM3_SLOW_WRITE_REG(par, PM3RD_PaletteData, g);
- PM3_SLOW_WRITE_REG(par, PM3RD_PaletteData, b);
+ PM3_WAIT(par, 4);
+ PM3_WRITE_REG(par, PM3RD_PaletteWriteAddress, regno);
+ wmb();
+ PM3_WRITE_REG(par, PM3RD_PaletteData, r);
+ wmb();
+ PM3_WRITE_REG(par, PM3RD_PaletteData, g);
+ wmb();
+ PM3_WRITE_REG(par, PM3RD_PaletteData, b);
+ wmb();
}
static void pm3fb_clear_colormap(struct pm3_par *par,
@@ -141,7 +128,7 @@
{
int i;
- for (i = 0; i < 256 ; i++) /* fill color map with white */
+ for (i = 0; i < 256 ; i++)
pm3fb_set_color(par, i, r, g, b);
}
@@ -175,19 +162,26 @@
}
}
-static inline int pm3fb_shift_bpp(unsigned long depth, int v)
+static inline int pm3fb_depth(const struct fb_var_screeninfo *var)
{
- switch (depth) {
+ if ( var->bits_per_pixel == 16 )
+ return var->red.length + var->green.length
+ + var->blue.length;
+
+ return var->bits_per_pixel;
+}
+
+static inline int pm3fb_shift_bpp(unsigned bpp, int v)
+{
+ switch (bpp) {
case 8:
return (v >> 4);
- case 12:
- case 15:
case 16:
return (v >> 3);
case 32:
return (v >> 2);
}
- DPRINTK("Unsupported depth %ld\n", depth);
+ DPRINTK("Unsupported depth %u\n", bpp);
return 0;
}
@@ -206,56 +200,50 @@
const u32 vbend = vsend + info->var.upper_margin;
const u32 vtotal = info->var.yres + vbend;
const u32 width = (info->var.xres_virtual + 7) & ~7;
+ const unsigned bpp = info->var.bits_per_pixel;
- PM3_SLOW_WRITE_REG(par, PM3MemBypassWriteMask, 0xffffffff);
- PM3_SLOW_WRITE_REG(par, PM3Aperture0, 0x00000000);
- PM3_SLOW_WRITE_REG(par, PM3Aperture1, 0x00000000);
- PM3_SLOW_WRITE_REG(par, PM3FIFODis, 0x00000007);
+ PM3_WAIT(par, 20);
+ PM3_WRITE_REG(par, PM3MemBypassWriteMask, 0xffffffff);
+ PM3_WRITE_REG(par, PM3Aperture0, 0x00000000);
+ PM3_WRITE_REG(par, PM3Aperture1, 0x00000000);
+ PM3_WRITE_REG(par, PM3FIFODis, 0x00000007);
- PM3_SLOW_WRITE_REG(par, PM3HTotal,
- pm3fb_shift_bpp(info->var.bits_per_pixel,
- htotal - 1));
- PM3_SLOW_WRITE_REG(par, PM3HsEnd,
- pm3fb_shift_bpp(info->var.bits_per_pixel,
- hsend));
- PM3_SLOW_WRITE_REG(par, PM3HsStart,
- pm3fb_shift_bpp(info->var.bits_per_pixel,
- hsstart));
- PM3_SLOW_WRITE_REG(par, PM3HbEnd,
- pm3fb_shift_bpp(info->var.bits_per_pixel,
- hbend));
- PM3_SLOW_WRITE_REG(par, PM3HgEnd,
- pm3fb_shift_bpp(info->var.bits_per_pixel,
- hbend));
- PM3_SLOW_WRITE_REG(par, PM3ScreenStride,
- pm3fb_shift_bpp(info->var.bits_per_pixel,
- width));
- PM3_SLOW_WRITE_REG(par, PM3VTotal, vtotal - 1);
- PM3_SLOW_WRITE_REG(par, PM3VsEnd, vsend - 1);
- PM3_SLOW_WRITE_REG(par, PM3VsStart, vsstart - 1);
- PM3_SLOW_WRITE_REG(par, PM3VbEnd, vbend);
+ PM3_WRITE_REG(par, PM3HTotal,
+ pm3fb_shift_bpp(bpp, htotal - 1));
+ PM3_WRITE_REG(par, PM3HsEnd,
+ pm3fb_shift_bpp(bpp, hsend));
+ PM3_WRITE_REG(par, PM3HsStart,
+ pm3fb_shift_bpp(bpp, hsstart));
+ PM3_WRITE_REG(par, PM3HbEnd,
+ pm3fb_shift_bpp(bpp, hbend));
+ PM3_WRITE_REG(par, PM3HgEnd,
+ pm3fb_shift_bpp(bpp, hbend));
+ PM3_WRITE_REG(par, PM3ScreenStride,
+ pm3fb_shift_bpp(bpp, width));
+ PM3_WRITE_REG(par, PM3VTotal, vtotal - 1);
+ PM3_WRITE_REG(par, PM3VsEnd, vsend - 1);
+ PM3_WRITE_REG(par, PM3VsStart, vsstart - 1);
+ PM3_WRITE_REG(par, PM3VbEnd, vbend);
- switch (info->var.bits_per_pixel) {
+ switch (bpp) {
case 8:
- PM3_SLOW_WRITE_REG(par, PM3ByAperture1Mode,
+ PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_8BIT);
- PM3_SLOW_WRITE_REG(par, PM3ByAperture2Mode,
+ PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_8BIT);
break;
- case 12:
- case 15:
case 16:
#ifndef __BIG_ENDIAN
- PM3_SLOW_WRITE_REG(par, PM3ByAperture1Mode,
+ PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_16BIT);
- PM3_SLOW_WRITE_REG(par, PM3ByAperture2Mode,
+ PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_16BIT);
#else
- PM3_SLOW_WRITE_REG(par, PM3ByAperture1Mode,
+ PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_16BIT |
PM3ByApertureMode_BYTESWAP_BADC);
- PM3_SLOW_WRITE_REG(par, PM3ByAperture2Mode,
+ PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_16BIT |
PM3ByApertureMode_BYTESWAP_BADC);
#endif /* ! __BIG_ENDIAN */
@@ -263,23 +251,22 @@
case 32:
#ifndef __BIG_ENDIAN
- PM3_SLOW_WRITE_REG(par, PM3ByAperture1Mode,
+ PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_32BIT);
- PM3_SLOW_WRITE_REG(par, PM3ByAperture2Mode,
+ PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_32BIT);
#else
- PM3_SLOW_WRITE_REG(par, PM3ByAperture1Mode,
+ PM3_WRITE_REG(par, PM3ByAperture1Mode,
PM3ByApertureMode_PIXELSIZE_32BIT |
PM3ByApertureMode_BYTESWAP_DCBA);
- PM3_SLOW_WRITE_REG(par, PM3ByAperture2Mode,
+ PM3_WRITE_REG(par, PM3ByAperture2Mode,
PM3ByApertureMode_PIXELSIZE_32BIT |
PM3ByApertureMode_BYTESWAP_DCBA);
#endif /* ! __BIG_ENDIAN */
break;
default:
- DPRINTK("Unsupported depth %d\n",
- info->var.bits_per_pixel);
+ DPRINTK("Unsupported depth %d\n", bpp);
break;
}
@@ -296,14 +283,15 @@
PM3VideoControl_VSYNC_MASK);
video |= PM3VideoControl_HSYNC_ACTIVE_HIGH |
PM3VideoControl_VSYNC_ACTIVE_HIGH;
- PM3_SLOW_WRITE_REG(par, PM3VideoControl, video);
+ PM3_WRITE_REG(par, PM3VideoControl, video);
}
- PM3_SLOW_WRITE_REG(par, PM3VClkCtl,
+ PM3_WRITE_REG(par, PM3VClkCtl,
(PM3_READ_REG(par, PM3VClkCtl) & 0xFFFFFFFC));
- PM3_SLOW_WRITE_REG(par, PM3ScreenBase, par->base);
- PM3_SLOW_WRITE_REG(par, PM3ChipConfig,
+ PM3_WRITE_REG(par, PM3ScreenBase, par->base);
+ PM3_WRITE_REG(par, PM3ChipConfig,
(PM3_READ_REG(par, PM3ChipConfig) & 0xFFFFFFFD));
+ wmb();
{
unsigned char uninitialized_var(m); /* ClkPreScale */
unsigned char uninitialized_var(n); /* ClkFeedBackScale */
@@ -337,7 +325,7 @@
PM3_WRITE_DAC_REG(par, PM3RD_DACControl, 0x00);
- switch (info->var.bits_per_pixel) {
+ switch (pm3fb_depth(&info->var)) {
case 8:
PM3_WRITE_DAC_REG(par, PM3RD_PixelSize,
PM3RD_PixelSize_8_BIT_PIXELS);
@@ -393,57 +381,44 @@
* hardware independent functions
*/
int pm3fb_init(void);
-int pm3fb_setup(char*);
static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
u32 lpitch;
+ unsigned bpp = var->red.length + var->green.length
+ + var->blue.length + var->transp.length;
- var->transp.offset = 0;
- var->transp.length = 0;
- switch(var->bits_per_pixel) {
- case 8:
- var->red.length = var->green.length = var->blue.length = 8;
- var->red.offset = var->green.offset = var->blue.offset = 0;
- break;
- case 12:
- var->red.offset = 8;
- var->red.length = 4;
- var->green.offset = 4;
- var->green.length = 4;
- var->blue.offset = 0;
- var->blue.length = 4;
- var->transp.offset = 12;
- var->transp.length = 4;
- case 15:
- var->red.offset = 10;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 5;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 15;
- var->transp.length = 1;
- break;
- case 16:
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- break;
- case 32:
- var->transp.offset = 24;
- var->transp.length = 8;
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = var->green.length = var->blue.length = 8;
- break;
- default:
- DPRINTK("depth not supported: %u\n", var->bits_per_pixel);
- return -EINVAL;
+ if ( bpp != var->bits_per_pixel ) {
+ /* set predefined mode for bits_per_pixel settings */
+
+ switch(var->bits_per_pixel) {
+ case 8:
+ var->red.length = var->green.length = var->blue.length = 8;
+ var->red.offset = var->green.offset = var->blue.offset = 0;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 16:
+ var->red.length = var->blue.length = 5;
+ var->green.length = 6;
+ var->transp.length = 0;
+ break;
+ case 32:
+ var->red.length = var->green.length = var->blue.length = 8;
+ var->transp.length = 8;
+ break;
+ default:
+ DPRINTK("depth not supported: %u\n", var->bits_per_pixel);
+ return -EINVAL;
+ }
+ }
+ /* it is assumed BGRA order */
+ if (var->bits_per_pixel > 8 )
+ {
+ var->blue.offset = 0;
+ var->green.offset = var->blue.length;
+ var->red.offset = var->green.offset + var->green.length;
+ var->transp.offset = var->red.offset + var->red.length;
}
var->height = var->width = -1;
@@ -502,10 +477,9 @@
{
struct pm3_par *par = info->par;
const u32 xres = (info->var.xres + 31) & ~31;
- const int depth = (info->var.bits_per_pixel + 7) & ~7;
+ const unsigned bpp = info->var.bits_per_pixel;
- par->base = pm3fb_shift_bpp(info->var.bits_per_pixel,
- (info->var.yoffset * xres)
+ par->base = pm3fb_shift_bpp(bpp,(info->var.yoffset * xres)
+ info->var.xoffset);
par->video = 0;
@@ -530,12 +504,10 @@
par->video |= PM3VideoControl_DISABLE;
DPRINTK("PM3Video disabled\n");
}
- switch (depth) {
+ switch (bpp) {
case 8:
par->video |= PM3VideoControl_PIXELSIZE_8BIT;
break;
- case 12:
- case 15:
case 16:
par->video |= PM3VideoControl_PIXELSIZE_16BIT;
break;
@@ -548,9 +520,9 @@
}
info->fix.visual =
- (depth == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+ (bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
info->fix.line_length = ((info->var.xres_virtual + 7) & ~7)
- * depth / 8;
+ * bpp / 8;
/* pm3fb_clear_memory(info, 0);*/
pm3fb_clear_colormap(par, 0, 0, 0);
@@ -580,8 +552,8 @@
* var->{color}.length contains length of bitfield
* {hardwarespecific} contains width of DAC
* pseudo_palette[X] is programmed to (X << red.offset) |
- * (X << green.offset) |
- * (X << blue.offset)
+ * (X << green.offset) |
+ * (X << blue.offset)
* RAMDAC[X] is programmed to (red, green, blue)
* color depth = SUM(var->{color}.length)
*
@@ -621,7 +593,6 @@
case 8:
break;
case 16:
- case 24:
case 32:
((u32*)(info->pseudo_palette))[regno] = v;
break;
@@ -643,7 +614,8 @@
par->base = pm3fb_shift_bpp(var->bits_per_pixel,
(var->yoffset * xres)
+ var->xoffset);
- PM3_SLOW_WRITE_REG(par, PM3ScreenBase, par->base);
+ PM3_WAIT(par, 1);
+ PM3_WRITE_REG(par, PM3ScreenBase, par->base);
return 0;
}
@@ -665,31 +637,31 @@
switch (blank_mode) {
case FB_BLANK_UNBLANK:
- video = video | PM3VideoControl_ENABLE;
+ video |= PM3VideoControl_ENABLE;
break;
- case FB_BLANK_NORMAL: /* FIXME */
- video = video & ~(PM3VideoControl_ENABLE);
+ case FB_BLANK_NORMAL:
+ video &= ~(PM3VideoControl_ENABLE);
break;
case FB_BLANK_HSYNC_SUSPEND:
- video = video & ~(PM3VideoControl_HSYNC_MASK |
- PM3VideoControl_BLANK_ACTIVE_LOW);
+ video &= ~(PM3VideoControl_HSYNC_MASK |
+ PM3VideoControl_BLANK_ACTIVE_LOW);
break;
case FB_BLANK_VSYNC_SUSPEND:
- video = video & ~(PM3VideoControl_VSYNC_MASK |
- PM3VideoControl_BLANK_ACTIVE_LOW);
+ video &= ~(PM3VideoControl_VSYNC_MASK |
+ PM3VideoControl_BLANK_ACTIVE_LOW);
break;
case FB_BLANK_POWERDOWN:
- video = video & ~(PM3VideoControl_HSYNC_MASK |
- PM3VideoControl_VSYNC_MASK |
- PM3VideoControl_BLANK_ACTIVE_LOW);
+ video &= ~(PM3VideoControl_HSYNC_MASK |
+ PM3VideoControl_VSYNC_MASK |
+ PM3VideoControl_BLANK_ACTIVE_LOW);
break;
default:
DPRINTK("Unsupported blanking %d\n", blank_mode);
return 1;
}
- PM3_SLOW_WRITE_REG(par,PM3VideoControl, video);
-
+ PM3_WAIT(par, 1);
+ PM3_WRITE_REG(par,PM3VideoControl, video);
return 0;
}
@@ -703,9 +675,9 @@
.fb_set_par = pm3fb_set_par,
.fb_setcolreg = pm3fb_setcolreg,
.fb_pan_display = pm3fb_pan_display,
- .fb_fillrect = cfb_fillrect, /* Needed !!! */
- .fb_copyarea = cfb_copyarea, /* Needed !!! */
- .fb_imageblit = cfb_imageblit, /* Needed !!! */
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
.fb_blank = pm3fb_blank,
};
@@ -722,7 +694,7 @@
unsigned long memsize = 0, tempBypass, i, temp1, temp2;
unsigned char __iomem *screen_mem;
- pm3fb_fix.smem_len = 64 * 1024 * 1024; /* request full aperture size */
+ pm3fb_fix.smem_len = 64 * 1024l * 1024; /* request full aperture size */
/* Linear frame buffer - request region and map it. */
if (!request_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len,
"pm3fb smem")) {
@@ -744,7 +716,8 @@
DPRINTK("PM3MemBypassWriteMask was: 0x%08lx\n", tempBypass);
- PM3_SLOW_WRITE_REG(par, PM3MemBypassWriteMask, 0xFFFFFFFF);
+ PM3_WAIT(par, 1);
+ PM3_WRITE_REG(par, PM3MemBypassWriteMask, 0xFFFFFFFF);
/* pm3 split up memory, replicates, and do a lot of nasty stuff IMHO ;-) */
for (i = 0; i < 32; i++) {
@@ -765,10 +738,9 @@
if (memsize + 1 == i) {
for (i = 0; i < 32; i++) {
/* Clear first 32MB ; 0 is 0, no need to byteswap */
- writel(0x0000000,
- (screen_mem + (i * 1048576)));
- mb();
+ writel(0x0000000, (screen_mem + (i * 1048576)));
}
+ wmb();
for (i = 32; i < 64; i++) {
fb_writel(i * 0x00345678,
@@ -787,7 +759,8 @@
}
DPRINTK("Second detect pass got %ld MB\n", memsize + 1);
- PM3_SLOW_WRITE_REG(par, PM3MemBypassWriteMask, tempBypass);
+ PM3_WAIT(par, 1);
+ PM3_WRITE_REG(par, PM3MemBypassWriteMask, tempBypass);
iounmap(screen_mem);
release_mem_region(pm3fb_fix.smem_start, pm3fb_fix.smem_len);
@@ -890,7 +863,6 @@
goto err_exit_both;
}
- /* This has to been done !!! */
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
retval = -ENOMEM;
goto err_exit_both;
@@ -907,7 +879,7 @@
}
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
info->fix.id);
- pci_set_drvdata(dev, info); /* or dev_set_drvdata(device, info) */
+ pci_set_drvdata(dev, info);
return 0;
err_exit_all:
@@ -949,8 +921,7 @@
static struct pci_device_id pm3fb_id_table[] = {
{ PCI_VENDOR_ID_3DLABS, 0x0a,
- PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16,
- 0xff0000, 0 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
@@ -964,6 +935,22 @@
MODULE_DEVICE_TABLE(pci, pm3fb_id_table);
+#ifndef MODULE
+ /*
+ * Setup
+ */
+
+/*
+ * Only necessary if your driver takes special options,
+ * otherwise we fall back on the generic fb_setup().
+ */
+static int __init pm3fb_setup(char *options)
+{
+ /* Parse user speficied options (`video=pm3fb:') */
+ return 0;
+}
+#endif /* MODULE */
+
int __init pm3fb_init(void)
{
/*
@@ -985,22 +972,6 @@
pci_unregister_driver(&pm3fb_driver);
}
-#ifndef MODULE
- /*
- * Setup
- */
-
-/*
- * Only necessary if your driver takes special options,
- * otherwise we fall back on the generic fb_setup().
- */
-int __init pm3fb_setup(char *options)
-{
- /* Parse user speficied options (`video=pm3fb:') */
- return 0;
-}
-#endif /* MODULE */
-
module_init(pm3fb_init);
module_exit(pm3fb_exit);
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 9756a72..9cf92ba 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -951,12 +951,14 @@
static struct fb_ops ps3fb_ops = {
.fb_open = ps3fb_open,
.fb_release = ps3fb_release,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
.fb_check_var = ps3fb_check_var,
.fb_set_par = ps3fb_set_par,
.fb_setcolreg = ps3fb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
.fb_mmap = ps3fb_mmap,
.fb_blank = ps3fb_blank,
.fb_ioctl = ps3fb_ioctl,
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 5fc86ea..003c49a 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -660,7 +660,7 @@
err = -ENODEV;
goto out;
}
- printk(" at 0x%08lx.\n", mem->start+W100_CFG_BASE);
+ printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE);
/* Remap the framebuffer */
remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
@@ -753,10 +753,14 @@
goto out;
}
- device_create_file(&pdev->dev, &dev_attr_fastpllclk);
- device_create_file(&pdev->dev, &dev_attr_reg_read);
- device_create_file(&pdev->dev, &dev_attr_reg_write);
- device_create_file(&pdev->dev, &dev_attr_flip);
+ err = device_create_file(&pdev->dev, &dev_attr_fastpllclk);
+ err |= device_create_file(&pdev->dev, &dev_attr_reg_read);
+ err |= device_create_file(&pdev->dev, &dev_attr_reg_write);
+ err |= device_create_file(&pdev->dev, &dev_attr_flip);
+
+ if (err != 0)
+ printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n",
+ info->node, err);
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
return 0;
diff --git a/fs/compat.c b/fs/compat.c
index 1de2331..4db6216 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1544,9 +1544,10 @@
compat_ulong_t __user *outp, compat_ulong_t __user *exp, s64 *timeout)
{
fd_set_bits fds;
- char *bits;
+ void *bits;
int size, max_fds, ret = -EINVAL;
struct fdtable *fdt;
+ long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
if (n < 0)
goto out_nofds;
@@ -1564,11 +1565,14 @@
* since we used fdset we need to allocate memory in units of
* long-words.
*/
- ret = -ENOMEM;
size = FDS_BYTES(n);
- bits = kmalloc(6 * size, GFP_KERNEL);
- if (!bits)
- goto out_nofds;
+ bits = stack_fds;
+ if (size > sizeof(stack_fds) / 6) {
+ bits = kmalloc(6 * size, GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!bits)
+ goto out_nofds;
+ }
fds.in = (unsigned long *) bits;
fds.out = (unsigned long *) (bits + size);
fds.ex = (unsigned long *) (bits + 2*size);
@@ -1600,7 +1604,8 @@
compat_set_fd_set(n, exp, fds.res_ex))
ret = -EFAULT;
out:
- kfree(bits);
+ if (bits != stack_fds)
+ kfree(bits);
out_nofds:
return ret;
}
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 9881b5c..59288d8 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -33,63 +33,6 @@
#include "ecryptfs_kernel.h"
/**
- * ecryptfs_llseek
- * @file: File we are seeking in
- * @offset: The offset to seek to
- * @origin: 2 - offset from i_size; 1 - offset from f_pos
- *
- * Returns the position we have seeked to, or negative on error
- */
-static loff_t ecryptfs_llseek(struct file *file, loff_t offset, int origin)
-{
- loff_t rv;
- loff_t new_end_pos;
- int rc;
- int expanding_file = 0;
- struct inode *inode = file->f_mapping->host;
-
- /* If our offset is past the end of our file, we're going to
- * need to grow it so we have a valid length of 0's */
- new_end_pos = offset;
- switch (origin) {
- case 2:
- new_end_pos += i_size_read(inode);
- expanding_file = 1;
- break;
- case 1:
- new_end_pos += file->f_pos;
- if (new_end_pos > i_size_read(inode)) {
- ecryptfs_printk(KERN_DEBUG, "new_end_pos(=[0x%.16x]) "
- "> i_size_read(inode)(=[0x%.16x])\n",
- new_end_pos, i_size_read(inode));
- expanding_file = 1;
- }
- break;
- default:
- if (new_end_pos > i_size_read(inode)) {
- ecryptfs_printk(KERN_DEBUG, "new_end_pos(=[0x%.16x]) "
- "> i_size_read(inode)(=[0x%.16x])\n",
- new_end_pos, i_size_read(inode));
- expanding_file = 1;
- }
- }
- ecryptfs_printk(KERN_DEBUG, "new_end_pos = [0x%.16x]\n", new_end_pos);
- if (expanding_file) {
- rc = ecryptfs_truncate(file->f_path.dentry, new_end_pos);
- if (rc) {
- rv = rc;
- ecryptfs_printk(KERN_ERR, "Error on attempt to "
- "truncate to (higher) offset [0x%.16x];"
- " rc = [%d]\n", new_end_pos, rc);
- goto out;
- }
- }
- rv = generic_file_llseek(file, offset, origin);
-out:
- return rv;
-}
-
-/**
* ecryptfs_read_update_atime
*
* generic_file_read updates the atime of upper layer inode. But, it
@@ -425,7 +368,7 @@
};
const struct file_operations ecryptfs_main_fops = {
- .llseek = ecryptfs_llseek,
+ .llseek = generic_file_llseek,
.read = do_sync_read,
.aio_read = ecryptfs_read_update_atime,
.write = do_sync_write,
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 88ea669..55cec98 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -376,8 +376,15 @@
return 0;
}
-static int ecryptfs_prepare_write(struct file *file, struct page *page,
- unsigned from, unsigned to)
+/**
+ * eCryptfs does not currently support holes. When writing after a
+ * seek past the end of the file, eCryptfs fills in 0's through to the
+ * current location. The code to fill in the 0's to all the
+ * intermediate pages calls ecryptfs_prepare_write_no_truncate().
+ */
+static int
+ecryptfs_prepare_write_no_truncate(struct file *file, struct page *page,
+ unsigned from, unsigned to)
{
int rc = 0;
@@ -390,6 +397,31 @@
return rc;
}
+static int ecryptfs_prepare_write(struct file *file, struct page *page,
+ unsigned from, unsigned to)
+{
+ loff_t pos;
+ int rc = 0;
+
+ if (from == 0 && to == PAGE_CACHE_SIZE)
+ goto out; /* If we are writing a full page, it will be
+ up to date. */
+ if (!PageUptodate(page))
+ rc = ecryptfs_do_readpage(file, page, page->index);
+ pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+ if (pos > i_size_read(page->mapping->host)) {
+ rc = ecryptfs_truncate(file->f_path.dentry, pos);
+ if (rc) {
+ printk(KERN_ERR "Error on attempt to "
+ "truncate to (higher) offset [%lld];"
+ " rc = [%d]\n", pos, rc);
+ goto out;
+ }
+ }
+out:
+ return rc;
+}
+
int ecryptfs_writepage_and_release_lower_page(struct page *lower_page,
struct inode *lower_inode,
struct writeback_control *wbc)
@@ -744,10 +776,10 @@
rc = PTR_ERR(tmp_page);
goto out;
}
- rc = ecryptfs_prepare_write(file, tmp_page, start, start + num_zeros);
- if (rc) {
+ if ((rc = ecryptfs_prepare_write_no_truncate(file, tmp_page, start,
+ (start + num_zeros)))) {
ecryptfs_printk(KERN_ERR, "Error preparing to write zero's "
- "to remainder of page at index [0x%.16x]\n",
+ "to page at index [0x%.16x]\n",
index);
page_cache_release(tmp_page);
goto out;
diff --git a/fs/exec.c b/fs/exec.c
index 0b68588..f20561f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -134,6 +134,9 @@
if (error)
goto out;
+ error = -EACCES;
+ if (nd.mnt->mnt_flags & MNT_NOEXEC)
+ goto exit;
error = -EINVAL;
if (!S_ISREG(nd.dentry->d_inode->i_mode))
goto exit;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8890eba..bd5a772 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -485,7 +485,7 @@
static int fuse_create(struct inode *dir, struct dentry *entry, int mode,
struct nameidata *nd)
{
- if (nd && (nd->flags & LOOKUP_CREATE)) {
+ if (nd && (nd->flags & LOOKUP_OPEN)) {
int err = fuse_create_open(dir, entry, mode, nd);
if (err != -ENOSYS)
return err;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d0ed60b..adf7995 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -610,7 +610,9 @@
ssize_t res;
/* Don't allow parallel writes to the same file */
mutex_lock(&inode->i_mutex);
- res = fuse_direct_io(file, buf, count, ppos, 1);
+ res = generic_write_checks(file, ppos, &count, 0);
+ if (!res)
+ res = fuse_direct_io(file, buf, count, ppos, 1);
mutex_unlock(&inode->i_mutex);
return res;
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 78f7a1d..9804c0c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -454,6 +454,7 @@
.destroy_inode = fuse_destroy_inode,
.read_inode = fuse_read_inode,
.clear_inode = fuse_clear_inode,
+ .drop_inode = generic_delete_inode,
.remount_fs = fuse_remount_fs,
.put_super = fuse_put_super,
.umount_begin = fuse_umount_begin,
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 345aa5c..0c542ec 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -122,19 +122,25 @@
return -EINVAL;
}
-static void nfs_direct_dirty_pages(struct page **pages, int npages)
+static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
{
- int i;
+ unsigned int npages;
+ unsigned int i;
+
+ if (count == 0)
+ return;
+ pages += (pgbase >> PAGE_SHIFT);
+ npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
struct page *page = pages[i];
if (!PageCompound(page))
- set_page_dirty_lock(page);
+ set_page_dirty(page);
}
}
-static void nfs_direct_release_pages(struct page **pages, int npages)
+static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
{
- int i;
+ unsigned int i;
for (i = 0; i < npages; i++)
page_cache_release(pages[i]);
}
@@ -224,17 +230,18 @@
if (nfs_readpage_result(task, data) != 0)
return;
- nfs_direct_dirty_pages(data->pagevec, data->npages);
- nfs_direct_release_pages(data->pagevec, data->npages);
-
spin_lock(&dreq->lock);
-
- if (likely(task->tk_status >= 0))
- dreq->count += data->res.count;
- else
+ if (unlikely(task->tk_status < 0)) {
dreq->error = task->tk_status;
-
- spin_unlock(&dreq->lock);
+ spin_unlock(&dreq->lock);
+ } else {
+ dreq->count += data->res.count;
+ spin_unlock(&dreq->lock);
+ nfs_direct_dirty_pages(data->pagevec,
+ data->args.pgbase,
+ data->res.count);
+ }
+ nfs_direct_release_pages(data->pagevec, data->npages);
if (put_dreq(dreq))
nfs_direct_complete(dreq);
@@ -279,9 +286,12 @@
result = get_user_pages(current, current->mm, user_addr,
data->npages, 1, 0, data->pagevec, NULL);
up_read(¤t->mm->mmap_sem);
- if (unlikely(result < data->npages)) {
- if (result > 0)
- nfs_direct_release_pages(data->pagevec, result);
+ if (result < 0) {
+ nfs_readdata_release(data);
+ break;
+ }
+ if ((unsigned)result < data->npages) {
+ nfs_direct_release_pages(data->pagevec, result);
nfs_readdata_release(data);
break;
}
@@ -610,9 +620,12 @@
result = get_user_pages(current, current->mm, user_addr,
data->npages, 0, 0, data->pagevec, NULL);
up_read(¤t->mm->mmap_sem);
- if (unlikely(result < data->npages)) {
- if (result > 0)
- nfs_direct_release_pages(data->pagevec, result);
+ if (result < 0) {
+ nfs_writedata_release(data);
+ break;
+ }
+ if ((unsigned)result < data->npages) {
+ nfs_direct_release_pages(data->pagevec, result);
nfs_writedata_release(data);
break;
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index cbdd1c6..c5bb51a 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -355,6 +355,26 @@
nfs_pageio_doio(desc);
}
+/**
+ * nfs_pageio_cond_complete - Conditional I/O completion
+ * @desc: pointer to io descriptor
+ * @index: page index
+ *
+ * It is important to ensure that processes don't try to take locks
+ * on non-contiguous ranges of pages as that might deadlock. This
+ * function should be called before attempting to wait on a locked
+ * nfs_page. It will complete the I/O if the page index 'index'
+ * is not contiguous with the existing list of pages in 'desc'.
+ */
+void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
+{
+ if (!list_empty(&desc->pg_list)) {
+ struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
+ if (index != prev->wb_index + 1)
+ nfs_pageio_doio(desc);
+ }
+}
+
#define NFS_SCAN_MAXENTRIES 16
/**
* nfs_scan_list - Scan a list for matching requests
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b084c03..af344a1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -273,8 +273,6 @@
* request as dirty (in which case we don't care).
*/
spin_unlock(req_lock);
- /* Prevent deadlock! */
- nfs_pageio_complete(pgio);
ret = nfs_wait_on_request(req);
nfs_release_request(req);
if (ret != 0)
@@ -321,6 +319,8 @@
pgio = &mypgio;
}
+ nfs_pageio_cond_complete(pgio, page->index);
+
err = nfs_page_async_flush(pgio, page);
if (err <= 0)
goto out;
@@ -329,6 +329,8 @@
if (!offset)
goto out;
+ nfs_pageio_cond_complete(pgio, page->index);
+
ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
if (ctx == NULL) {
err = -EBADF;
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 9c23fee..ffbfc2c 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -10,7 +10,7 @@
#include <linux/buffer_head.h>
#include <asm/uaccess.h>
-extern struct reiserfs_key MIN_KEY;
+extern const struct reiserfs_key MIN_KEY;
static int reiserfs_readdir(struct file *, void *, filldir_t);
static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry,
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7cfeab4..f1da892 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -11,6 +11,8 @@
* Now using anonymous inode source.
* Thanks to Oleg Nesterov for useful code review and suggestions.
* More comments and suggestions from Arnd Bergmann.
+ * Sat May 19, 2007: Davi E. M. Arnaut <davi@haxent.com.br>
+ * Retrieve multiple signals with one read() call
*/
#include <linux/file.h>
@@ -206,6 +208,59 @@
return err ? -EFAULT: sizeof(*uinfo);
}
+static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, siginfo_t *info,
+ int nonblock)
+{
+ ssize_t ret;
+ struct signalfd_lockctx lk;
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (!signalfd_lock(ctx, &lk))
+ return 0;
+
+ ret = dequeue_signal(lk.tsk, &ctx->sigmask, info);
+ switch (ret) {
+ case 0:
+ if (!nonblock)
+ break;
+ ret = -EAGAIN;
+ default:
+ signalfd_unlock(&lk);
+ return ret;
+ }
+
+ add_wait_queue(&ctx->wqh, &wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ ret = dequeue_signal(lk.tsk, &ctx->sigmask, info);
+ signalfd_unlock(&lk);
+ if (ret != 0)
+ break;
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ ret = signalfd_lock(ctx, &lk);
+ if (unlikely(!ret)) {
+ /*
+ * Let the caller read zero byte, ala socket
+ * recv() when the peer disconnect. This test
+ * must be done before doing a dequeue_signal(),
+ * because if the sighand has been orphaned,
+ * the dequeue_signal() call is going to crash
+ * because ->sighand will be long gone.
+ */
+ break;
+ }
+ }
+
+ remove_wait_queue(&ctx->wqh, &wait);
+ __set_current_state(TASK_RUNNING);
+
+ return ret;
+}
+
/*
* Returns either the size of a "struct signalfd_siginfo", or zero if the
* sighand we are attached to, has been orphaned. The "count" parameter
@@ -215,55 +270,30 @@
loff_t *ppos)
{
struct signalfd_ctx *ctx = file->private_data;
- ssize_t res = 0;
- int locked, signo;
+ struct signalfd_siginfo __user *siginfo;
+ int nonblock = file->f_flags & O_NONBLOCK;
+ ssize_t ret, total = 0;
siginfo_t info;
- struct signalfd_lockctx lk;
- DECLARE_WAITQUEUE(wait, current);
- if (count < sizeof(struct signalfd_siginfo))
+ count /= sizeof(struct signalfd_siginfo);
+ if (!count)
return -EINVAL;
- locked = signalfd_lock(ctx, &lk);
- if (!locked)
- return 0;
- res = -EAGAIN;
- signo = dequeue_signal(lk.tsk, &ctx->sigmask, &info);
- if (signo == 0 && !(file->f_flags & O_NONBLOCK)) {
- add_wait_queue(&ctx->wqh, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- signo = dequeue_signal(lk.tsk, &ctx->sigmask, &info);
- if (signo != 0)
- break;
- if (signal_pending(current)) {
- res = -ERESTARTSYS;
- break;
- }
- signalfd_unlock(&lk);
- schedule();
- locked = signalfd_lock(ctx, &lk);
- if (unlikely(!locked)) {
- /*
- * Let the caller read zero byte, ala socket
- * recv() when the peer disconnect. This test
- * must be done before doing a dequeue_signal(),
- * because if the sighand has been orphaned,
- * the dequeue_signal() call is going to crash.
- */
- res = 0;
- break;
- }
- }
- remove_wait_queue(&ctx->wqh, &wait);
- __set_current_state(TASK_RUNNING);
- }
- if (likely(locked))
- signalfd_unlock(&lk);
- if (likely(signo))
- res = signalfd_copyinfo((struct signalfd_siginfo __user *) buf,
- &info);
- return res;
+ siginfo = (struct signalfd_siginfo __user *) buf;
+
+ do {
+ ret = signalfd_dequeue(ctx, &info, nonblock);
+ if (unlikely(ret <= 0))
+ break;
+ ret = signalfd_copyinfo(siginfo, &info);
+ if (ret < 0)
+ break;
+ siginfo++;
+ total += ret;
+ nonblock = 1;
+ } while (--count);
+
+ return total ? total : ret;
}
static const struct file_operations signalfd_fops = {
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 14fae1f..7f30cce 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -35,7 +35,7 @@
#define WARN_ON(condition) ({ \
typeof(condition) __ret_warn_on = (condition); \
if (unlikely(__ret_warn_on)) { \
- printk("BUG: at %s:%d %s()\n", __FILE__, \
+ printk("WARNING: at %s:%d %s()\n", __FILE__, \
__LINE__, __FUNCTION__); \
dump_stack(); \
} \
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 0baa2f8..437aac8 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -182,7 +182,7 @@
int __i;
#ifdef CONFIG_M386
unsigned long flags;
- if(unlikely(boot_cpu_data.x86==3))
+ if(unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index e13d3e9..6e85975 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -135,7 +135,7 @@
long __i;
#ifdef CONFIG_M386
unsigned long flags;
- if(unlikely(boot_cpu_data.x86==3))
+ if(unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 6548b35..bbf8df7 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -16,6 +16,8 @@
#include <linux/types.h>
#include <linux/compiler.h>
+struct task_struct;
+
/* User-level do most of the mapping between kernel and user
capabilities based on the version tag given by the kernel. The
kernel might be somewhat backwards compatible, but don't bet on
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 5e75e26..4631086 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -37,25 +37,25 @@
/*
* Wake up a frozen process
+ *
+ * task_lock() is taken to prevent the race with refrigerator() which may
+ * occur if the freezing of tasks fails. Namely, without the lock, if the
+ * freezing of tasks failed, thaw_tasks() might have run before a task in
+ * refrigerator() could call frozen_process(), in which case the task would be
+ * frozen and no one would thaw it.
*/
static inline int thaw_process(struct task_struct *p)
{
+ task_lock(p);
if (frozen(p)) {
p->flags &= ~PF_FROZEN;
+ task_unlock(p);
wake_up_process(p);
return 1;
}
- return 0;
-}
-
-/*
- * freezing is complete, mark process as frozen
- */
-static inline void frozen_process(struct task_struct *p)
-{
- p->flags |= PF_FROZEN;
- wmb();
clear_tsk_thread_flag(p, TIF_FREEZE);
+ task_unlock(p);
+ return 0;
}
extern void refrigerator(void);
@@ -71,14 +71,55 @@
return 0;
}
-extern void thaw_some_processes(int all);
+/*
+ * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
+ * calls wait_for_completion(&vfork) and reset right after it returns from this
+ * function. Next, the parent should call try_to_freeze() to freeze itself
+ * appropriately in case the child has exited before the freezing of tasks is
+ * complete. However, we don't want kernel threads to be frozen in unexpected
+ * places, so we allow them to block freeze_processes() instead or to set
+ * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork
+ * parents. Fortunately, in the ____call_usermodehelper() case the parent won't
+ * really block freeze_processes(), since ____call_usermodehelper() (the child)
+ * does a little before exec/exit and it can't be frozen before waking up the
+ * parent.
+ */
+
+/*
+ * If the current task is a user space one, tell the freezer not to count it as
+ * freezable.
+ */
+static inline void freezer_do_not_count(void)
+{
+ if (current->mm)
+ current->flags |= PF_FREEZER_SKIP;
+}
+
+/*
+ * If the current task is a user space one, tell the freezer to count it as
+ * freezable again and try to freeze it.
+ */
+static inline void freezer_count(void)
+{
+ if (current->mm) {
+ current->flags &= ~PF_FREEZER_SKIP;
+ try_to_freeze();
+ }
+}
+
+/*
+ * Check if the task should be counted as freezeable by the freezer
+ */
+static inline int freezer_should_skip(struct task_struct *p)
+{
+ return !!(p->flags & PF_FREEZER_SKIP);
+}
#else
static inline int frozen(struct task_struct *p) { return 0; }
static inline int freezing(struct task_struct *p) { return 0; }
static inline void freeze(struct task_struct *p) { BUG(); }
static inline int thaw_process(struct task_struct *p) { return 1; }
-static inline void frozen_process(struct task_struct *p) { BUG(); }
static inline void refrigerator(void) {}
static inline int freeze_processes(void) { BUG(); return 0; }
@@ -86,5 +127,7 @@
static inline int try_to_freeze(void) { return 0; }
-
+static inline void freezer_do_not_count(void) {}
+static inline void freezer_count(void) {}
+static inline int freezer_should_skip(struct task_struct *p) { return 0; }
#endif
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 4c03ee3..9756fc1 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -66,6 +66,7 @@
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/fs.h>
+#include <linux/workqueue.h>
struct partition {
unsigned char boot_ind; /* 0x80 - active */
@@ -94,6 +95,7 @@
#define GENHD_FL_REMOVABLE 1
#define GENHD_FL_DRIVERFS 2
+#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4
#define GENHD_FL_CD 8
#define GENHD_FL_UP 16
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
@@ -138,6 +140,7 @@
#else
struct disk_stats dkstats;
#endif
+ struct work_struct async_notify;
};
/* Structure for sysfs attributes on block devices */
@@ -419,7 +422,7 @@
extern struct gendisk *alloc_disk(int minors);
extern struct kobject *get_disk(struct gendisk *disk);
extern void put_disk(struct gendisk *disk);
-
+extern void genhd_media_change_notify(struct gendisk *disk);
extern void blk_register_region(dev_t dev, unsigned long range,
struct module *module,
struct kobject *(*probe)(dev_t, int *, void *),
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 41afab6..bd193af 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -81,6 +81,7 @@
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
struct nfs_page *);
extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
+extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern int nfs_set_page_writeback_locked(struct nfs_page *req);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 62b3e00..4712e26 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -371,6 +371,7 @@
#define PCI_DEVICE_ID_ATI_IXP600_SMBUS 0x4385
#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390
+#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c
#define PCI_VENDOR_ID_VLSI 0x1004
#define PCI_DEVICE_ID_VLSI_82C592 0x0005
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index 6db9a4c..dd5a05d 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -232,6 +232,7 @@
struct page **filemap; /* list of cache pages for the file */
unsigned long *filemap_attr; /* attributes associated w/ filemap pages */
unsigned long file_pages; /* number of pages in the file */
+ int last_page_size; /* bytes in the last page */
unsigned long flags;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a81897e..d58e74b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1182,6 +1182,7 @@
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1615,11 +1616,13 @@
return 0;
}
-/* Reevaluate whether the task has signals pending delivery.
- This is required every time the blocked sigset_t changes.
- callers must hold sighand->siglock. */
-
-extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));
+/*
+ * Reevaluate whether the task has signals pending delivery.
+ * Wake the task if so.
+ * This is required every time the blocked sigset_t changes.
+ * callers must hold sighand->siglock.
+ */
+extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void signal_wake_up(struct task_struct *t, int resume_stopped);
diff --git a/kernel/exit.c b/kernel/exit.c
index c6d14b8..5b888c2 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -762,11 +762,8 @@
read_lock(&tasklist_lock);
spin_lock_irq(&tsk->sighand->siglock);
for (t = next_thread(tsk); t != tsk; t = next_thread(t))
- if (!signal_pending(t) && !(t->flags & PF_EXITING)) {
- recalc_sigpending_tsk(t);
- if (signal_pending(t))
- signal_wake_up(t, 0);
- }
+ if (!signal_pending(t) && !(t->flags & PF_EXITING))
+ recalc_sigpending_and_wake(t);
spin_unlock_irq(&tsk->sighand->siglock);
read_unlock(&tasklist_lock);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 87069cf..73ad5cd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -45,6 +45,7 @@
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
+#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
@@ -1405,7 +1406,9 @@
}
if (clone_flags & CLONE_VFORK) {
+ freezer_do_not_count();
wait_for_completion(&vfork);
+ freezer_count();
if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
current->ptrace_message = nr;
ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index b0d81aa..bd9e272 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -135,6 +135,39 @@
}
}
+static inline int try_misrouted_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
+{
+ struct irqaction *action;
+
+ if (!irqfixup)
+ return 0;
+
+ /* We didn't actually handle the IRQ - see if it was misrouted? */
+ if (action_ret == IRQ_NONE)
+ return 1;
+
+ /*
+ * But for 'irqfixup == 2' we also do it for handled interrupts if
+ * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
+ * traditional PC timer interrupt.. Legacy)
+ */
+ if (irqfixup < 2)
+ return 0;
+
+ if (!irq)
+ return 1;
+
+ /*
+ * Since we don't get the descriptor lock, "action" can
+ * change under us. We don't really care, but we don't
+ * want to follow a NULL pointer. So tell the compiler to
+ * just load it once by using a barrier.
+ */
+ action = desc->action;
+ barrier();
+ return action && (action->flags & IRQF_IRQPOLL);
+}
+
void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
@@ -144,15 +177,10 @@
report_bad_irq(irq, desc, action_ret);
}
- if (unlikely(irqfixup)) {
- /* Don't punish working computers */
- if ((irqfixup == 2 && ((irq == 0) ||
- (desc->action->flags & IRQF_IRQPOLL))) ||
- action_ret == IRQ_NONE) {
- int ok = misrouted_irq(irq);
- if (action_ret == IRQ_NONE)
- desc->irqs_unhandled -= ok;
- }
+ if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
+ int ok = misrouted_irq(irq);
+ if (action_ret == IRQ_NONE)
+ desc->irqs_unhandled -= ok;
}
desc->irq_count++;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index df8a8e8..bbd51b8 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -70,7 +70,7 @@
data = create->data;
/* OK, tell user we're spawned, wait for stop or wakeup */
- __set_current_state(TASK_INTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
complete(&create->started);
schedule();
@@ -162,7 +162,10 @@
*/
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
- BUG_ON(k->state != TASK_INTERRUPTIBLE);
+ if (k->state != TASK_UNINTERRUPTIBLE) {
+ WARN_ON(1);
+ return;
+ }
/* Must have done schedule() in kthread() before we set_task_cpu */
wait_task_inactive(k);
set_task_cpu(k, cpu);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0884193..e0233d8 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -31,16 +31,36 @@
return 1;
}
+/*
+ * freezing is complete, mark current process as frozen
+ */
+static inline void frozen_process(void)
+{
+ if (!unlikely(current->flags & PF_NOFREEZE)) {
+ current->flags |= PF_FROZEN;
+ wmb();
+ }
+ clear_tsk_thread_flag(current, TIF_FREEZE);
+}
+
/* Refrigerator is place where frozen processes are stored :-). */
void refrigerator(void)
{
/* Hmm, should we be allowed to suspend when there are realtime
processes around? */
long save;
+
+ task_lock(current);
+ if (freezing(current)) {
+ frozen_process();
+ task_unlock(current);
+ } else {
+ task_unlock(current);
+ return;
+ }
save = current->state;
pr_debug("%s entered refrigerator\n", current->comm);
- frozen_process(current);
spin_lock_irq(¤t->sighand->siglock);
recalc_sigpending(); /* We sent fake signal, clean it up */
spin_unlock_irq(¤t->sighand->siglock);
@@ -81,7 +101,7 @@
pr_debug(" clean up: %s\n", p->comm);
do_not_freeze(p);
spin_lock_irqsave(&p->sighand->siglock, flags);
- recalc_sigpending_tsk(p);
+ recalc_sigpending_and_wake(p);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
}
@@ -112,22 +132,12 @@
cancel_freezing(p);
continue;
}
- if (is_user_space(p)) {
- if (!freeze_user_space)
- continue;
+ if (freeze_user_space && !is_user_space(p))
+ continue;
- /* Freeze the task unless there is a vfork
- * completion pending
- */
- if (!p->vfork_done)
- freeze_process(p);
- } else {
- if (freeze_user_space)
- continue;
-
- freeze_process(p);
- }
- todo++;
+ freeze_process(p);
+ if (!freezer_should_skip(p))
+ todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
yield(); /* Yield is okay here */
@@ -149,13 +159,16 @@
TIMEOUT / HZ, todo);
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- if (is_user_space(p) == !freeze_user_space)
+ if (freeze_user_space && !is_user_space(p))
continue;
- if (freezeable(p) && !frozen(p))
+ task_lock(p);
+ if (freezeable(p) && !frozen(p) &&
+ !freezer_should_skip(p))
printk(KERN_ERR " %s\n", p->comm);
cancel_freezing(p);
+ task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
@@ -200,9 +213,7 @@
if (is_user_space(p) == !thaw_user_space)
continue;
- if (!thaw_process(p))
- printk(KERN_WARNING " Strange, %s not stopped\n",
- p->comm );
+ thaw_process(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b8b235c..8b1a1b8 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -584,7 +584,7 @@
resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
if (!IS_ERR(resume_bdev)) {
set_blocksize(resume_bdev, PAGE_SIZE);
- memset(swsusp_header, 0, sizeof(PAGE_SIZE));
+ memset(swsusp_header, 0, PAGE_SIZE);
error = bio_read_page(swsusp_resume_block,
swsusp_header, NULL);
if (error)
diff --git a/kernel/sched.c b/kernel/sched.c
index 799d23b..13cdab3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4775,9 +4775,7 @@
BUG_ON(!in_softirq());
if (need_resched() && system_state == SYSTEM_RUNNING) {
- raw_local_irq_disable();
- _local_bh_enable();
- raw_local_irq_enable();
+ local_bh_enable();
__cond_resched();
local_bh_disable();
return 1;
diff --git a/kernel/signal.c b/kernel/signal.c
index 364fc95..acdfc05 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -96,15 +96,27 @@
#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
-fastcall void recalc_sigpending_tsk(struct task_struct *t)
+static int recalc_sigpending_tsk(struct task_struct *t)
{
if (t->signal->group_stop_count > 0 ||
(freezing(t)) ||
PENDING(&t->pending, &t->blocked) ||
- PENDING(&t->signal->shared_pending, &t->blocked))
+ PENDING(&t->signal->shared_pending, &t->blocked)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
- else
- clear_tsk_thread_flag(t, TIF_SIGPENDING);
+ return 1;
+ }
+ clear_tsk_thread_flag(t, TIF_SIGPENDING);
+ return 0;
+}
+
+/*
+ * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
+ * This is superfluous when called on current, the wakeup is a harmless no-op.
+ */
+void recalc_sigpending_and_wake(struct task_struct *t)
+{
+ if (recalc_sigpending_tsk(t))
+ signal_wake_up(t, 0);
}
void recalc_sigpending(void)
@@ -744,7 +756,7 @@
action->sa.sa_handler = SIG_DFL;
if (blocked) {
sigdelset(&t->blocked, sig);
- recalc_sigpending_tsk(t);
+ recalc_sigpending_and_wake(t);
}
}
ret = specific_send_sig_info(sig, info, t);
@@ -2273,7 +2285,7 @@
rm_from_queue_full(&mask, &t->signal->shared_pending);
do {
rm_from_queue_full(&mask, &t->pending);
- recalc_sigpending_tsk(t);
+ recalc_sigpending_and_wake(t);
t = next_thread(t);
} while (t != current);
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index eadfce2..8001d37 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -243,11 +243,18 @@
{
int cpu = get_cpu();
- if (cpu == *oncpu)
- tick_do_broadcast_on_off(&reason);
- else
- smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
- &reason, 1, 1);
+ if (!cpu_isset(*oncpu, cpu_online_map)) {
+ printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
+ "offline CPU #%d\n", *oncpu);
+ } else {
+
+ if (cpu == *oncpu)
+ tick_do_broadcast_on_off(&reason);
+ else
+ smp_call_function_single(*oncpu,
+ tick_do_broadcast_on_off,
+ &reason, 1, 1);
+ }
put_cpu();
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3483e6c..3e7ebc4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -167,9 +167,15 @@
goto end;
cpu = smp_processor_id();
- if (unlikely(local_softirq_pending()))
- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- local_softirq_pending());
+ if (unlikely(local_softirq_pending())) {
+ static int ratelimit;
+
+ if (ratelimit < 10) {
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+ local_softirq_pending());
+ ratelimit++;
+ }
+ }
now = ktime_get();
/*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fb56fed..3bebf73 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -47,7 +47,6 @@
struct workqueue_struct *wq;
struct task_struct *thread;
- int should_stop;
int run_depth; /* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;
@@ -71,7 +70,13 @@
static int singlethread_cpu __read_mostly;
static cpumask_t cpu_singlethread_map __read_mostly;
-/* optimization, we could use cpu_possible_map */
+/*
+ * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
+ * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
+ * which comes in between can't use for_each_online_cpu(). We could
+ * use cpu_possible_map, the cpumask below is more a documentation
+ * than optimization.
+ */
static cpumask_t cpu_populated_map __read_mostly;
/* If it's single threaded, it isn't in the list of workqueues. */
@@ -272,24 +277,6 @@
spin_unlock_irq(&cwq->lock);
}
-/*
- * NOTE: the caller must not touch *cwq if this func returns true
- */
-static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
-{
- int should_stop = cwq->should_stop;
-
- if (unlikely(should_stop)) {
- spin_lock_irq(&cwq->lock);
- should_stop = cwq->should_stop && list_empty(&cwq->worklist);
- if (should_stop)
- cwq->thread = NULL;
- spin_unlock_irq(&cwq->lock);
- }
-
- return should_stop;
-}
-
static int worker_thread(void *__cwq)
{
struct cpu_workqueue_struct *cwq = __cwq;
@@ -302,14 +289,15 @@
for (;;) {
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
- if (!freezing(current) && !cwq->should_stop
- && list_empty(&cwq->worklist))
+ if (!freezing(current) &&
+ !kthread_should_stop() &&
+ list_empty(&cwq->worklist))
schedule();
finish_wait(&cwq->more_work, &wait);
try_to_freeze();
- if (cwq_should_stop(cwq))
+ if (kthread_should_stop())
break;
run_workqueue(cwq);
@@ -340,18 +328,21 @@
insert_work(cwq, &barr->work, tail);
}
-static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
+static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
+ int active;
+
if (cwq->thread == current) {
/*
* Probably keventd trying to flush its own queue. So simply run
* it by hand rather than deadlocking.
*/
run_workqueue(cwq);
+ active = 1;
} else {
struct wq_barrier barr;
- int active = 0;
+ active = 0;
spin_lock_irq(&cwq->lock);
if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
insert_wq_barrier(cwq, &barr, 1);
@@ -362,6 +353,8 @@
if (active)
wait_for_completion(&barr.done);
}
+
+ return active;
}
/**
@@ -674,7 +667,6 @@
return PTR_ERR(p);
cwq->thread = p;
- cwq->should_stop = 0;
return 0;
}
@@ -740,29 +732,27 @@
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
- struct wq_barrier barr;
- int alive = 0;
+ /*
+ * Our caller is either destroy_workqueue() or CPU_DEAD,
+ * workqueue_mutex protects cwq->thread
+ */
+ if (cwq->thread == NULL)
+ return;
- spin_lock_irq(&cwq->lock);
- if (cwq->thread != NULL) {
- insert_wq_barrier(cwq, &barr, 1);
- cwq->should_stop = 1;
- alive = 1;
- }
- spin_unlock_irq(&cwq->lock);
+ /*
+ * If the caller is CPU_DEAD the single flush_cpu_workqueue()
+ * is not enough, a concurrent flush_workqueue() can insert a
+ * barrier after us.
+ * When ->worklist becomes empty it is safe to exit because no
+ * more work_structs can be queued on this cwq: flush_workqueue
+ * checks list_empty(), and a "normal" queue_work() can't use
+ * a dead CPU.
+ */
+ while (flush_cpu_workqueue(cwq))
+ ;
- if (alive) {
- wait_for_completion(&barr.done);
-
- while (unlikely(cwq->thread != NULL))
- cpu_relax();
- /*
- * Wait until cwq->thread unlocks cwq->lock,
- * it won't touch *cwq after that.
- */
- smp_rmb();
- spin_unlock_wait(&cwq->lock);
- }
+ kthread_stop(cwq->thread);
+ cwq->thread = NULL;
}
/**
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index fbc5c62..1ba77ca 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -378,14 +378,13 @@
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
+ depends on m
default n
help
This option provides a kernel module that runs torture tests
on the RCU infrastructure. The kernel module may be built
after the fact on the running kernel to be tested, if desired.
- Say Y here if you want RCU torture tests to start automatically
- at boot time (you probably don't).
Say M if you want the RCU torture tests to build as a module.
Say N if you are unsure.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8b000d6..d897062 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -136,6 +136,11 @@
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#if MAX_NUMNODES > 1
+int nr_node_ids __read_mostly = MAX_NUMNODES;
+EXPORT_SYMBOL(nr_node_ids);
+#endif
+
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
@@ -669,26 +674,6 @@
return i;
}
-#if MAX_NUMNODES > 1
-int nr_node_ids __read_mostly = MAX_NUMNODES;
-EXPORT_SYMBOL(nr_node_ids);
-
-/*
- * Figure out the number of possible node ids.
- */
-static void __init setup_nr_node_ids(void)
-{
- unsigned int node;
- unsigned int highest = 0;
-
- for_each_node_mask(node, node_possible_map)
- highest = node;
- nr_node_ids = highest + 1;
-}
-#else
-static void __init setup_nr_node_ids(void) {}
-#endif
-
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -2733,6 +2718,26 @@
}
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+
+#if MAX_NUMNODES > 1
+/*
+ * Figure out the number of possible node ids.
+ */
+static void __init setup_nr_node_ids(void)
+{
+ unsigned int node;
+ unsigned int highest = 0;
+
+ for_each_node_mask(node, node_possible_map)
+ highest = node;
+ nr_node_ids = highest + 1;
+}
+#else
+static inline void setup_nr_node_ids(void)
+{
+}
+#endif
+
/**
* add_active_range - Register a range of PFNs backed by physical memory
* @nid: The node ID the range resides on
diff --git a/mm/slub.c b/mm/slub.c
index 98801d4..3e5aefc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -939,7 +939,7 @@
* Debugging or ctor may create a need to move the free
* pointer. Fail if this happens.
*/
- if (s->size >= 65535 * sizeof(void *)) {
+ if (s->objsize >= 65535 * sizeof(void *)) {
BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
BUG_ON(s->ctor);
@@ -1917,7 +1917,6 @@
*/
s->inuse = size;
-#ifdef CONFIG_SLUB_DEBUG
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
s->ctor)) {
/*
@@ -1932,6 +1931,7 @@
size += sizeof(void *);
}
+#ifdef CONFIG_SLUB_DEBUG
if (flags & SLAB_STORE_USER)
/*
* Need to store information about allocs and frees after