Merge master.kernel.org:/pub/scm/linux/kernel/git/perex/alsa
* master.kernel.org:/pub/scm/linux/kernel/git/perex/alsa:
[ALSA] Don't reject O_RDWR at opening PCM OSS with read/write-only device
[ALSA] snd-emu10k1: Implement support for Audigy 2 ZS [SB0353]
[ALSA] add MAINTAINERS entry for snd-aoa
[ALSA] aoa: platform function gpio: ignore errors from functions that don't exist
[ALSA] make snd-powermac load even when it can't bind the device
[ALSA] aoa: fix toonie codec
[ALSA] aoa: feature gpio layer: fix IRQ access
[ALSA] Conversions from kmalloc+memset to k(z|c)alloc
[ALSA] snd-emu10k1: Fixes ALSA bug#2190
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt
index 7fedc00..555c8cf 100644
--- a/Documentation/cpu-freq/user-guide.txt
+++ b/Documentation/cpu-freq/user-guide.txt
@@ -153,10 +153,13 @@
that some governors won't load - they only
work on some specific architectures or
processors.
-scaling_min_freq and
+scaling_min_freq and
scaling_max_freq show the current "policy limits" (in
kHz). By echoing new values into these
files, you can change these limits.
+ NOTE: when setting a policy you need to
+ first set scaling_max_freq, then
+ scaling_min_freq.
If you have selected the "userspace" governor which allows you to
diff --git a/Documentation/infiniband/ipoib.txt b/Documentation/infiniband/ipoib.txt
index 1870355..864ff32 100644
--- a/Documentation/infiniband/ipoib.txt
+++ b/Documentation/infiniband/ipoib.txt
@@ -51,8 +51,6 @@
References
- IETF IP over InfiniBand (ipoib) Working Group
- http://ietf.org/html.charters/ipoib-charter.html
Transmission of IP over InfiniBand (IPoIB) (RFC 4391)
http://ietf.org/rfc/rfc4391.txt
IP over InfiniBand (IPoIB) Architecture (RFC 4392)
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index b0c7ab9..7345c33 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -211,9 +211,8 @@
0: try to continue operation
-1: delay a few seconds (to give klogd time to record the oops output) and
- then panic. If the `panic' sysctl is also non-zero then the machine will
- be rebooted.
+1: panic immediatly. If the `panic' sysctl is also non-zero then the
+ machine will be rebooted.
==============================================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c267aa..e3e1515 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -214,6 +214,12 @@
T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
S: Maintained
+ACPI PCI HOTPLUG DRIVER
+P: Kristen Carlson Accardi
+M: kristen.c.accardi@intel.com
+L: pcihpd-discuss@lists.sourceforge.net
+S: Maintained
+
AD1816 SOUND DRIVER
P: Thorsten Knabe
M: Thorsten Knabe <linux@thorsten-knabe.de>
@@ -2642,6 +2648,14 @@
L: spi-devel-general@lists.sourceforge.net
S: Maintained
+STABLE BRANCH:
+P: Greg Kroah-Hartman
+M: greg@kroah.com
+P: Chris Wright
+M: chrisw@sous-sol.org
+L: stable@kernel.org
+S: Maintained
+
TPM DEVICE DRIVER
P: Kylene Hall
M: kjhall@us.ibm.com
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index e44a4c6..ccc1edf 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -96,6 +96,7 @@
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
+ depends on PCI
help
This add the CPUFreq driver for NatSemi Geode processors which
support suspend modulation.
@@ -202,7 +203,7 @@
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
select CPU_FREQ_TABLE
- depends on BROKEN
+ depends on ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 567b39b..efb41e8 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -384,8 +384,7 @@
}
/* Do initialization in ACPI core */
- acpi_processor_preregister_performance(acpi_perf_data);
- return 0;
+ return acpi_processor_preregister_performance(acpi_perf_data);
}
static int
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 146f607..4f2c3ae 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -29,11 +29,13 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/timex.h>
#include <asm/io.h>
+#include <asm/acpi.h>
+#include <linux/acpi.h>
+#include <acpi/processor.h>
#include "longhaul.h"
@@ -56,6 +58,8 @@
static unsigned int minmult, maxmult;
static int can_scale_voltage;
static int vrmrev;
+static struct acpi_processor *pr = NULL;
+static struct acpi_processor_cx *cx = NULL;
/* Module parameters */
static int dont_scale_voltage;
@@ -118,84 +122,65 @@
return eblcr_table[invalue];
}
+/* For processor with BCR2 MSR */
-static void do_powersaver(union msr_longhaul *longhaul,
- unsigned int clock_ratio_index)
+static void do_longhaul1(int cx_address, unsigned int clock_ratio_index)
{
- struct pci_dev *dev;
- unsigned long flags;
- unsigned int tmp_mask;
- int version;
- int i;
- u16 pci_cmd;
- u16 cmd_state[64];
+ union msr_bcr2 bcr2;
+ u32 t;
- switch (cpu_model) {
- case CPU_EZRA_T:
- version = 3;
- break;
- case CPU_NEHEMIAH:
- version = 0xf;
- break;
- default:
- return;
- }
+ rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ /* Enable software clock multiplier */
+ bcr2.bits.ESOFTBF = 1;
+ bcr2.bits.CLOCKMUL = clock_ratio_index;
- rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
- longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf;
- longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
- longhaul->bits.EnableSoftBusRatio = 1;
- longhaul->bits.RevisionKey = 0;
-
- preempt_disable();
- local_irq_save(flags);
-
- /*
- * get current pci bus master state for all devices
- * and clear bus master bit
- */
- dev = NULL;
- i = 0;
- do {
- dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
- if (dev != NULL) {
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- cmd_state[i++] = pci_cmd;
- pci_cmd &= ~PCI_COMMAND_MASTER;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
- }
- } while (dev != NULL);
-
- tmp_mask=inb(0x21); /* works on C3. save mask. */
- outb(0xFE,0x21); /* TMR0 only */
- outb(0xFF,0x80); /* delay */
-
+ /* Sync to timer tick */
safe_halt();
- wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
- halt();
+ ACPI_FLUSH_CPU_CACHE();
+ /* Change frequency on next halt or sleep */
+ wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3 read */
+ t = inl(acpi_fadt.xpm_tmr_blk.address);
+ /* Disable software clock multiplier */
local_irq_disable();
+ rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ bcr2.bits.ESOFTBF = 0;
+ wrmsrl(MSR_VIA_BCR2, bcr2.val);
+}
- outb(tmp_mask,0x21); /* restore mask */
+/* For processor with Longhaul MSR */
- /* restore pci bus master state for all devices */
- dev = NULL;
- i = 0;
- do {
- dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
- if (dev != NULL) {
- pci_cmd = cmd_state[i++];
- pci_write_config_byte(dev, PCI_COMMAND, pci_cmd);
- }
- } while (dev != NULL);
- local_irq_restore(flags);
- preempt_enable();
+static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
+{
+ union msr_longhaul longhaul;
+ u32 t;
- /* disable bus ratio bit */
- rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
- longhaul->bits.EnableSoftBusRatio = 0;
- longhaul->bits.RevisionKey = version;
- wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
+ rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
+ longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
+ longhaul.bits.EnableSoftBusRatio = 1;
+
+ /* Sync to timer tick */
+ safe_halt();
+ ACPI_FLUSH_CPU_CACHE();
+ /* Change frequency on next halt or sleep */
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3 read */
+ t = inl(acpi_fadt.xpm_tmr_blk.address);
+
+ /* Disable bus ratio bit */
+ local_irq_disable();
+ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ longhaul.bits.EnableSoftBusRatio = 0;
+ longhaul.bits.EnableSoftBSEL = 0;
+ longhaul.bits.EnableSoftVID = 0;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
}
/**
@@ -209,9 +194,9 @@
{
int speed, mult;
struct cpufreq_freqs freqs;
- union msr_longhaul longhaul;
- union msr_bcr2 bcr2;
static unsigned int old_ratio=-1;
+ unsigned long flags;
+ unsigned int pic1_mask, pic2_mask;
if (old_ratio == clock_ratio_index)
return;
@@ -234,6 +219,20 @@
dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
+ preempt_disable();
+ local_irq_save(flags);
+
+ pic2_mask = inb(0xA1);
+ pic1_mask = inb(0x21); /* works on C3. save mask. */
+ outb(0xFF,0xA1); /* Overkill */
+ outb(0xFE,0x21); /* TMR0 only */
+
+ /* Disable bus master arbitration */
+ if (pr->flags.bm_check) {
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
+ ACPI_MTX_DO_NOT_LOCK);
+ }
+
switch (longhaul_version) {
/*
@@ -245,20 +244,7 @@
*/
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
- rdmsrl (MSR_VIA_BCR2, bcr2.val);
- /* Enable software clock multiplier */
- bcr2.bits.ESOFTBF = 1;
- bcr2.bits.CLOCKMUL = clock_ratio_index;
- local_irq_disable();
- wrmsrl (MSR_VIA_BCR2, bcr2.val);
- safe_halt();
-
- /* Disable software clock multiplier */
- rdmsrl (MSR_VIA_BCR2, bcr2.val);
- bcr2.bits.ESOFTBF = 0;
- local_irq_disable();
- wrmsrl (MSR_VIA_BCR2, bcr2.val);
- local_irq_enable();
+ do_longhaul1(cx->address, clock_ratio_index);
break;
/*
@@ -273,10 +259,22 @@
* to work in practice.
*/
case TYPE_POWERSAVER:
- do_powersaver(&longhaul, clock_ratio_index);
+ do_powersaver(cx->address, clock_ratio_index);
break;
}
+ /* Enable bus master arbitration */
+ if (pr->flags.bm_check) {
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
+ ACPI_MTX_DO_NOT_LOCK);
+ }
+
+ outb(pic2_mask,0xA1); /* restore mask */
+ outb(pic1_mask,0x21);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
@@ -324,9 +322,11 @@
static int __init longhaul_get_ranges(void)
{
unsigned long invalue;
- unsigned int multipliers[32]= {
- 50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65,
- -1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 };
+ unsigned int ezra_t_multipliers[32]= {
+ 90, 30, 40, 100, 55, 35, 45, 95,
+ 50, 70, 80, 60, 120, 75, 85, 65,
+ -1, 110, 120, -1, 135, 115, 125, 105,
+ 130, 150, 160, 140, -1, 155, -1, 145 };
unsigned int j, k = 0;
union msr_longhaul longhaul;
unsigned long lo, hi;
@@ -355,13 +355,13 @@
invalue = longhaul.bits.MaxMHzBR;
if (longhaul.bits.MaxMHzBR4)
invalue += 16;
- maxmult=multipliers[invalue];
+ maxmult=ezra_t_multipliers[invalue];
invalue = longhaul.bits.MinMHzBR;
if (longhaul.bits.MinMHzBR4 == 1)
minmult = 30;
else
- minmult = multipliers[invalue];
+ minmult = ezra_t_multipliers[invalue];
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break;
}
@@ -527,6 +527,18 @@
return calc_speed(longhaul_get_cpu_mult());
}
+static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
+ u32 nesting_level,
+ void *context, void **return_value)
+{
+ struct acpi_device *d;
+
+ if ( acpi_bus_get_device(obj_handle, &d) ) {
+ return 0;
+ }
+ *return_value = (void *)acpi_driver_data(d);
+ return 1;
+}
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{
@@ -534,6 +546,15 @@
char *cpuname=NULL;
int ret;
+ /* Check ACPI support for C3 state */
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ &longhaul_walk_callback, NULL, (void *)&pr);
+ if (pr == NULL) goto err_acpi;
+
+ cx = &pr->power.states[ACPI_STATE_C3];
+ if (cx->address == 0 || cx->latency > 1000) goto err_acpi;
+
+ /* Now check what we have on this motherboard */
switch (c->x86_model) {
case 6:
cpu_model = CPU_SAMUEL;
@@ -634,6 +655,10 @@
cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
return 0;
+
+err_acpi:
+ printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n");
+ return -ENODEV;
}
static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
@@ -666,6 +691,18 @@
if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
return -ENODEV;
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) {
+ return -ENODEV;
+ printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
+ }
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ if (cpu_has_apic) {
+ printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n");
+ return -ENODEV;
+ }
+#endif
switch (c->x86_model) {
case 6 ... 9:
return cpufreq_register_driver(&longhaul_driver);
@@ -699,6 +736,6 @@
MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE ("GPL");
-module_init(longhaul_init);
+late_initcall(longhaul_init);
module_exit(longhaul_exit);
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index a3fe975..8a4f0d0 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -151,7 +151,7 @@
simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
{
int list_len = sc->use_sg;
- struct scatterlist *sl = (struct scatterlist *)sc->buffer;
+ struct scatterlist *sl = (struct scatterlist *)sc->request_buffer;
struct disk_stat stat;
struct disk_req req;
@@ -244,7 +244,7 @@
if (scatterlen == 0)
memcpy(sc->request_buffer, buf, len);
- else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) {
+ else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) {
unsigned thislen = min(len, slp->length);
memcpy(page_address(slp->page) + slp->offset, buf, thislen);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index e4bfa9d..bb8770a 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -632,7 +632,7 @@
if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
- return 0;
+ return NULL;
}
static efi_memory_desc_t *
@@ -652,7 +652,7 @@
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
- return 0;
+ return NULL;
}
u32
@@ -923,7 +923,7 @@
void
efi_memmap_init(unsigned long *s, unsigned long *e)
{
- struct kern_memdesc *k, *prev = 0;
+ struct kern_memdesc *k, *prev = NULL;
u64 contig_low=0, contig_high=0;
u64 as, ae, lim;
void *efi_map_start, *efi_map_end, *p, *q;
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 561b8f1..29236f0 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -853,7 +853,6 @@
*/
GLOBAL_ENTRY(ia64_switch_mode_phys)
{
- alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
@@ -902,7 +901,6 @@
*/
GLOBAL_ENTRY(ia64_switch_mode_virt)
{
- alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index b7cf651..3ead20f 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -62,7 +62,7 @@
EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3);
-#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
+#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
extern void xor_ia64_4(void);
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 5018c7f..ebaf1e6 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -217,12 +217,7 @@
.body
;;
ld8 loc2 = [loc2] // loc2 <- entry point
- mov out0 = in0 // first argument
- mov out1 = in1 // copy arg2
- mov out2 = in2 // copy arg3
- mov out3 = in3 // copy arg3
- ;;
- mov loc3 = psr // save psr
+ mov loc3 = psr // save psr
;;
mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical
@@ -236,18 +231,23 @@
;;
andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode_phys
-.ret6:
+
+ mov out0 = in0 // first argument
+ mov out1 = in1 // copy arg2
+ mov out2 = in2 // copy arg3
+ mov out3 = in3 // copy arg3
mov loc5 = r19
mov loc6 = r20
+
br.call.sptk.many rp=b7 // now make the call
-.ret7:
+
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
mov r19=loc5
mov r20=loc6
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-.ret8: mov psr.l = loc3 // restore init PSR
+ mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1
mov rp = loc0
;;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index ab5b5241..0b546e2 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -566,29 +566,23 @@
pal_version_u_t min_ver, cur_ver;
char *p = page;
- /* The PAL_VERSION call is advertised as being able to support
- * both physical and virtual mode calls. This seems to be a documentation
- * bug rather than firmware bug. In fact, it does only support physical mode.
- * So now the code reflects this fact and the pal_version() has been updated
- * accordingly.
- */
- if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
+ if (ia64_pal_version(&min_ver, &cur_ver) != 0)
+ return 0;
p += sprintf(p,
"PAL_vendor : 0x%02x (min=0x%02x)\n"
- "PAL_A : %x.%x.%x (min=%x.%x.%x)\n"
- "PAL_B : %x.%x.%x (min=%x.%x.%x)\n",
- cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor,
-
- cur_ver.pal_version_s.pv_pal_a_model>>4,
- cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev,
- min_ver.pal_version_s.pv_pal_a_model>>4,
- min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev,
-
- cur_ver.pal_version_s.pv_pal_b_model>>4,
- cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev,
- min_ver.pal_version_s.pv_pal_b_model>>4,
- min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
+ "PAL_A : %02x.%02x (min=%02x.%02x)\n"
+ "PAL_B : %02x.%02x (min=%02x.%02x)\n",
+ cur_ver.pal_version_s.pv_pal_vendor,
+ min_ver.pal_version_s.pv_pal_vendor,
+ cur_ver.pal_version_s.pv_pal_a_model,
+ cur_ver.pal_version_s.pv_pal_a_rev,
+ min_ver.pal_version_s.pv_pal_a_model,
+ min_ver.pal_version_s.pv_pal_a_rev,
+ cur_ver.pal_version_s.pv_pal_b_model,
+ cur_ver.pal_version_s.pv_pal_b_rev,
+ min_ver.pal_version_s.pv_pal_b_model,
+ min_ver.pal_version_s.pv_pal_b_rev);
return p - page;
}
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 5f03b9e..4c73a67 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -32,32 +32,38 @@
extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
-#define MAX_UNCACHED_GRANULES 5
-static int allocated_granules;
+struct uncached_pool {
+ struct gen_pool *pool;
+ struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
+ int nchunks_added; /* #of converted chunks added to pool */
+ atomic_t status; /* smp called function's return status*/
+};
-struct gen_pool *uncached_pool[MAX_NUMNODES];
+#define MAX_CONVERTED_CHUNKS_PER_NODE 2
+
+struct uncached_pool uncached_pools[MAX_NUMNODES];
static void uncached_ipi_visibility(void *data)
{
int status;
+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
if ((status != PAL_VISIBILITY_OK) &&
(status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
- printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
- "CPU %i\n", status, raw_smp_processor_id());
+ atomic_inc(&uc_pool->status);
}
static void uncached_ipi_mc_drain(void *data)
{
int status;
+ struct uncached_pool *uc_pool = (struct uncached_pool *)data;
status = ia64_pal_mc_drain();
- if (status)
- printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
- "CPU %i\n", status, raw_smp_processor_id());
+ if (status != PAL_STATUS_SUCCESS)
+ atomic_inc(&uc_pool->status);
}
@@ -70,21 +76,34 @@
* This is accomplished by first allocating a granule of cached memory pages
* and then converting them to uncached memory pages.
*/
-static int uncached_add_chunk(struct gen_pool *pool, int nid)
+static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
{
struct page *page;
- int status, i;
+ int status, i, nchunks_added = uc_pool->nchunks_added;
unsigned long c_addr, uc_addr;
- if (allocated_granules >= MAX_UNCACHED_GRANULES)
+ if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
+ return -1; /* interrupted by a signal */
+
+ if (uc_pool->nchunks_added > nchunks_added) {
+ /* someone added a new chunk while we were waiting */
+ mutex_unlock(&uc_pool->add_chunk_mutex);
+ return 0;
+ }
+
+ if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
+ }
/* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
- if (!page)
+ if (!page) {
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
+ }
/* convert the memory pages from cached to uncached */
@@ -102,11 +121,14 @@
flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
- if (!status) {
- status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
- if (status)
+ if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
+ atomic_set(&uc_pool->status, 0);
+ status = smp_call_function(uncached_ipi_visibility, uc_pool,
+ 0, 1);
+ if (status || atomic_read(&uc_pool->status))
goto failed;
- }
+ } else if (status != PAL_VISIBILITY_OK)
+ goto failed;
preempt_disable();
@@ -120,20 +142,24 @@
preempt_enable();
- ia64_pal_mc_drain();
- status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
- if (status)
+ status = ia64_pal_mc_drain();
+ if (status != PAL_STATUS_SUCCESS)
+ goto failed;
+ atomic_set(&uc_pool->status, 0);
+ status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
+ if (status || atomic_read(&uc_pool->status))
goto failed;
/*
* The chunk of memory pages has been converted to uncached so now we
* can add it to the pool.
*/
- status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
+ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
if (status)
goto failed;
- allocated_granules++;
+ uc_pool->nchunks_added++;
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return 0;
/* failed to convert or add the chunk so give it back to the kernel */
@@ -142,6 +168,7 @@
ClearPageUncached(&page[i]);
free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ mutex_unlock(&uc_pool->add_chunk_mutex);
return -1;
}
@@ -158,7 +185,7 @@
unsigned long uncached_alloc_page(int starting_nid)
{
unsigned long uc_addr;
- struct gen_pool *pool;
+ struct uncached_pool *uc_pool;
int nid;
if (unlikely(starting_nid >= MAX_NUMNODES))
@@ -171,14 +198,14 @@
do {
if (!node_online(nid))
continue;
- pool = uncached_pool[nid];
- if (pool == NULL)
+ uc_pool = &uncached_pools[nid];
+ if (uc_pool->pool == NULL)
continue;
do {
- uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
+ uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);
if (uc_addr != 0)
return uc_addr;
- } while (uncached_add_chunk(pool, nid) == 0);
+ } while (uncached_add_chunk(uc_pool, nid) == 0);
} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
@@ -197,7 +224,7 @@
void uncached_free_page(unsigned long uc_addr)
{
int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
- struct gen_pool *pool = uncached_pool[nid];
+ struct gen_pool *pool = uncached_pools[nid].pool;
if (unlikely(pool == NULL))
return;
@@ -224,7 +251,7 @@
unsigned long uc_end, void *arg)
{
int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
- struct gen_pool *pool = uncached_pool[nid];
+ struct gen_pool *pool = uncached_pools[nid].pool;
size_t size = uc_end - uc_start;
touch_softlockup_watchdog();
@@ -242,7 +269,8 @@
int nid;
for_each_online_node(nid) {
- uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
+ uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
+ mutex_init(&uncached_pools[nid].add_chunk_mutex);
}
efi_memmap_walk_uc(uncached_build_memmap, NULL);
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index d8536a2..38fa6e4 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -14,7 +14,7 @@
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
-lib-$(CONFIG_MD_RAID5) += xor.o
+lib-$(CONFIG_MD_RAID456) += xor.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2a88cdd..e004143 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -27,6 +27,7 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long num_dma_physpages;
+static unsigned long max_gap;
#endif
/**
@@ -45,9 +46,15 @@
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
- while (i-- > 0) {
- if (!pfn_valid(i))
+ for (i = 0; i < max_mapnr; i++) {
+ if (!pfn_valid(i)) {
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+ if (max_gap < LARGE_GAP)
+ continue;
+ i = vmemmap_find_next_valid_pfn(0, i) - 1;
+#endif
continue;
+ }
total++;
if (PageReserved(mem_map+i))
reserved++;
@@ -234,7 +241,6 @@
unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long zholes_size[MAX_NR_ZONES];
- unsigned long max_gap;
#endif
/* initialize mem_map[] */
@@ -266,7 +272,6 @@
}
}
- max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
@@ -277,7 +282,8 @@
/* allocate virtual_mem_map */
- map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 99bd9e3..d260bff 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -534,68 +534,6 @@
}
#endif /* CONFIG_SMP */
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
-{
- unsigned long end_address, hole_next_pfn;
- unsigned long stop_address;
-
- end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
- end_address = PAGE_ALIGN(end_address);
-
- stop_address = (unsigned long) &vmem_map[
- pgdat->node_start_pfn + pgdat->node_spanned_pages];
-
- do {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset_k(end_address);
- if (pgd_none(*pgd)) {
- end_address += PGDIR_SIZE;
- continue;
- }
-
- pud = pud_offset(pgd, end_address);
- if (pud_none(*pud)) {
- end_address += PUD_SIZE;
- continue;
- }
-
- pmd = pmd_offset(pud, end_address);
- if (pmd_none(*pmd)) {
- end_address += PMD_SIZE;
- continue;
- }
-
- pte = pte_offset_kernel(pmd, end_address);
-retry_pte:
- if (pte_none(*pte)) {
- end_address += PAGE_SIZE;
- pte++;
- if ((end_address < stop_address) &&
- (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
- goto retry_pte;
- continue;
- }
- /* Found next valid vmem_map page */
- break;
- } while (end_address < stop_address);
-
- end_address = min(end_address, stop_address);
- end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
- hole_next_pfn = end_address / sizeof(struct page);
- return hole_next_pfn - pgdat->node_start_pfn;
-}
-#else
-static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
-{
- return i + 1;
-}
-#endif
-
/**
* show_mem - give short summary of memory stats
*
@@ -625,7 +563,8 @@
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
else {
- i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1;
+ i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+ i) - 1;
continue;
}
if (PageReserved(page))
@@ -751,7 +690,8 @@
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 2f50c06..30617cc 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -415,6 +415,61 @@
}
#ifdef CONFIG_VIRTUAL_MEM_MAP
+int vmemmap_find_next_valid_pfn(int node, int i)
+{
+ unsigned long end_address, hole_next_pfn;
+ unsigned long stop_address;
+ pg_data_t *pgdat = NODE_DATA(node);
+
+ end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
+ end_address = PAGE_ALIGN(end_address);
+
+ stop_address = (unsigned long) &vmem_map[
+ pgdat->node_start_pfn + pgdat->node_spanned_pages];
+
+ do {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_offset_k(end_address);
+ if (pgd_none(*pgd)) {
+ end_address += PGDIR_SIZE;
+ continue;
+ }
+
+ pud = pud_offset(pgd, end_address);
+ if (pud_none(*pud)) {
+ end_address += PUD_SIZE;
+ continue;
+ }
+
+ pmd = pmd_offset(pud, end_address);
+ if (pmd_none(*pmd)) {
+ end_address += PMD_SIZE;
+ continue;
+ }
+
+ pte = pte_offset_kernel(pmd, end_address);
+retry_pte:
+ if (pte_none(*pte)) {
+ end_address += PAGE_SIZE;
+ pte++;
+ if ((end_address < stop_address) &&
+ (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
+ goto retry_pte;
+ continue;
+ }
+ /* Found next valid vmem_map page */
+ break;
+ } while (end_address < stop_address);
+
+ end_address = min(end_address, stop_address);
+ end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
+ hole_next_pfn = end_address / sizeof(struct page);
+ return hole_next_pfn - pgdat->node_start_pfn;
+}
int __init
create_mem_map_page_table (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 07bd02b..4280c07 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -32,7 +32,7 @@
*/
attr = kern_mem_attribute(offset, size);
if (attr & EFI_MEMORY_WB)
- return phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(offset);
else if (attr & EFI_MEMORY_UC)
return __ioremap(offset, size);
@@ -43,7 +43,7 @@
gran_base = GRANULEROUNDDOWN(offset);
gran_size = GRANULEROUNDUP(offset + size) - gran_base;
if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
- return phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(offset);
return __ioremap(offset, size);
}
@@ -53,7 +53,7 @@
ioremap_nocache (unsigned long offset, unsigned long size)
{
if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
- return 0;
+ return NULL;
return __ioremap(offset, size);
}
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 99b123a..5e8e59e 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -480,7 +480,7 @@
partid_t partid = (u64) __partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
- struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 };
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
int ret;
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 17cd342..af7171a 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -74,7 +74,7 @@
else
mmr_war_offset = 0x158;
- readq_relaxed((void *)(mmr_base + mmr_war_offset));
+ readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
}
}
@@ -92,8 +92,8 @@
if (mmr_offset < 0x45000) {
if (mmr_offset == 0x100)
- readq_relaxed((void *)(mmr_base + 0x38));
- readq_relaxed((void *)(mmr_base + 0xb050));
+ readq_relaxed((void __iomem *)(mmr_base + 0x38));
+ readq_relaxed((void __iomem *)(mmr_base + 0xb050));
}
}
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 781dbb1..b09805f 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -421,18 +421,22 @@
static int __init sq_api_init(void)
{
+ int ret;
printk(KERN_NOTICE "sq: Registering store queue API.\n");
-#ifdef CONFIG_PROC_FS
create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0);
-#endif
- return misc_register(&sq_dev);
+ ret = misc_register(&sq_dev);
+ if (ret)
+ remove_proc_entry("sq_mapping", NULL);
+
+ return ret;
}
static void __exit sq_api_exit(void)
{
misc_deregister(&sq_dev);
+ remove_proc_entry("sq_mapping", NULL);
}
module_init(sq_api_init);
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 5a1c0a3..06af6ca 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -203,7 +203,7 @@
{
int i;
for_each_cpu_mask(i, cpu_possible_map) {
- spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
+ spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
}
return 0;
}
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index 81e970a..b0d4b14 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -129,11 +129,15 @@
struct acpi_memory_info *info, *n;
+ if (!list_empty(&mem_device->res_list))
+ return 0;
+
status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS,
acpi_memory_get_resource, mem_device);
if (ACPI_FAILURE(status)) {
list_for_each_entry_safe(info, n, &mem_device->res_list, list)
kfree(info);
+ INIT_LIST_HEAD(&mem_device->res_list);
return -EINVAL;
}
@@ -230,17 +234,10 @@
* (i.e. memory-hot-remove function)
*/
list_for_each_entry(info, &mem_device->res_list, list) {
- u64 start_pfn, end_pfn;
-
- start_pfn = info->start_addr >> PAGE_SHIFT;
- end_pfn = (info->start_addr + info->length - 1) >> PAGE_SHIFT;
-
- if (pfn_valid(start_pfn) || pfn_valid(end_pfn)) {
- /* already enabled. try next area */
+ if (info->enabled) { /* just sanity check...*/
num_enabled++;
continue;
}
-
result = add_memory(node, info->start_addr, info->length);
if (result)
continue;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 1c0a39d..578b99b 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -58,8 +58,8 @@
};
#define DOCK_DOCKING 0x00000001
-#define DOCK_EVENT KOBJ_DOCK
-#define UNDOCK_EVENT KOBJ_UNDOCK
+#define DOCK_EVENT 3
+#define UNDOCK_EVENT 2
static struct dock_station *dock_station;
@@ -322,11 +322,10 @@
static void dock_event(struct dock_station *ds, u32 event, int num)
{
- struct acpi_device *device;
-
- device = dock_create_acpi_device(ds->handle);
- if (device)
- kobject_uevent(&device->kobj, num);
+ /*
+ * we don't do events until someone tells me that
+ * they would like to have them.
+ */
}
/**
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 41db806..017f755 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -311,7 +311,8 @@
/* CD went away; no more connection */
pr_debug("hvsi%i: CD dropped\n", hp->index);
hp->mctrl &= TIOCM_CD;
- if (!(hp->tty->flags & CLOCAL))
+ /* If userland hasn't done an open(2) yet, hp->tty is NULL. */
+ if (hp->tty && !(hp->tty->flags & CLOCAL))
*to_hangup = hp->tty;
}
break;
@@ -986,10 +987,7 @@
start_j = 0;
#endif /* DEBUG */
wake_up_all(&hp->emptyq);
- if (test_bit(TTY_DO_WRITE_WAKEUP, &hp->tty->flags)
- && hp->tty->ldisc.write_wakeup)
- hp->tty->ldisc.write_wakeup(hp->tty);
- wake_up_interruptible(&hp->tty->write_wait);
+ tty_wakeup(hp->tty);
}
out:
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 819516b..a01d796 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -25,12 +25,12 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/random.h>
+#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#define RNG_OUT_REG 0x00 /* Output register */
#define RNG_STAT_REG 0x04 /* Status register
@@ -52,7 +52,7 @@
static void __iomem *rng_base;
static struct clk *rng_ick;
-static struct device *rng_dev;
+static struct platform_device *rng_dev;
static u32 omap_rng_read_reg(int reg)
{
@@ -83,9 +83,8 @@
.data_read = omap_rng_data_read,
};
-static int __init omap_rng_probe(struct device *dev)
+static int __init omap_rng_probe(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev);
struct resource *res, *mem;
int ret;
@@ -95,16 +94,14 @@
*/
BUG_ON(rng_dev);
- if (cpu_is_omap24xx()) {
+ if (cpu_is_omap24xx()) {
rng_ick = clk_get(NULL, "rng_ick");
if (IS_ERR(rng_ick)) {
- dev_err(dev, "Could not get rng_ick\n");
+ dev_err(&pdev->dev, "Could not get rng_ick\n");
ret = PTR_ERR(rng_ick);
return ret;
- }
- else {
- clk_use(rng_ick);
- }
+ } else
+ clk_enable(rng_ick);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -117,7 +114,7 @@
if (mem == NULL)
return -EBUSY;
- dev_set_drvdata(dev, mem);
+ dev_set_drvdata(&pdev->dev, mem);
rng_base = (u32 __iomem *)io_p2v(res->start);
ret = hwrng_register(&omap_rng_ops);
@@ -127,25 +124,25 @@
return ret;
}
- dev_info(dev, "OMAP Random Number Generator ver. %02x\n",
+ dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
omap_rng_read_reg(RNG_REV_REG));
omap_rng_write_reg(RNG_MASK_REG, 0x1);
- rng_dev = dev;
+ rng_dev = pdev;
return 0;
}
-static int __exit omap_rng_remove(struct device *dev)
+static int __exit omap_rng_remove(struct platform_device *pdev)
{
- struct resource *mem = dev_get_drvdata(dev);
+ struct resource *mem = dev_get_drvdata(&pdev->dev);
hwrng_unregister(&omap_rng_ops);
omap_rng_write_reg(RNG_MASK_REG, 0x0);
if (cpu_is_omap24xx()) {
- clk_unuse(rng_ick);
+ clk_disable(rng_ick);
clk_put(rng_ick);
}
@@ -157,18 +154,16 @@
#ifdef CONFIG_PM
-static int omap_rng_suspend(struct device *dev, pm_message_t message, u32 level)
+static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message)
{
omap_rng_write_reg(RNG_MASK_REG, 0x0);
-
return 0;
}
-static int omap_rng_resume(struct device *dev, pm_message_t message, u32 level)
+static int omap_rng_resume(struct platform_device *pdev)
{
omap_rng_write_reg(RNG_MASK_REG, 0x1);
-
- return 1;
+ return 0;
}
#else
@@ -179,9 +174,11 @@
#endif
-static struct device_driver omap_rng_driver = {
- .name = "omap_rng",
- .bus = &platform_bus_type,
+static struct platform_driver omap_rng_driver = {
+ .driver = {
+ .name = "omap_rng",
+ .owner = THIS_MODULE,
+ },
.probe = omap_rng_probe,
.remove = __exit_p(omap_rng_remove),
.suspend = omap_rng_suspend,
@@ -193,12 +190,12 @@
if (!cpu_is_omap16xx() && !cpu_is_omap24xx())
return -ENODEV;
- return driver_register(&omap_rng_driver);
+ return platform_driver_register(&omap_rng_driver);
}
static void __exit omap_rng_exit(void)
{
- driver_unregister(&omap_rng_driver);
+ platform_driver_unregister(&omap_rng_driver);
}
module_init(omap_rng_init);
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index afc6eda..07e0b75 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -374,7 +374,12 @@
struct sysctl_data_s *scd;
void *salbuf;
dev_t first_dev, dev;
- nasid_t event_nasid = ia64_sn_get_console_nasid();
+ nasid_t event_nasid;
+
+ if (!ia64_platform_is("sn2"))
+ return -ENODEV;
+
+ event_nasid = ia64_sn_get_console_nasid();
if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
SYSCTL_BASENAME) < 0) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index bc1088d..b3df613 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -284,39 +284,69 @@
* SYSFS INTERFACE *
*********************************************************************/
+static struct cpufreq_governor *__find_governor(const char *str_governor)
+{
+ struct cpufreq_governor *t;
+
+ list_for_each_entry(t, &cpufreq_governor_list, governor_list)
+ if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
+ return t;
+
+ return NULL;
+}
+
/**
* cpufreq_parse_governor - parse a governor string
*/
static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
struct cpufreq_governor **governor)
{
+ int err = -EINVAL;
+
if (!cpufreq_driver)
- return -EINVAL;
+ goto out;
+
if (cpufreq_driver->setpolicy) {
if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_PERFORMANCE;
- return 0;
+ err = 0;
} else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_POWERSAVE;
- return 0;
+ err = 0;
}
- return -EINVAL;
- } else {
+ } else if (cpufreq_driver->target) {
struct cpufreq_governor *t;
+
mutex_lock(&cpufreq_governor_mutex);
- if (!cpufreq_driver || !cpufreq_driver->target)
- goto out;
- list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
- if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
- *governor = t;
+
+ t = __find_governor(str_governor);
+
+ if (t == NULL) {
+ char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor);
+
+ if (name) {
+ int ret;
+
mutex_unlock(&cpufreq_governor_mutex);
- return 0;
+ ret = request_module(name);
+ mutex_lock(&cpufreq_governor_mutex);
+
+ if (ret == 0)
+ t = __find_governor(str_governor);
}
+
+ kfree(name);
}
-out:
+
+ if (t != NULL) {
+ *governor = t;
+ err = 0;
+ }
+
mutex_unlock(&cpufreq_governor_mutex);
}
- return -EINVAL;
+ out:
+ return err;
}
@@ -1265,23 +1295,21 @@
int cpufreq_register_governor(struct cpufreq_governor *governor)
{
- struct cpufreq_governor *t;
+ int err;
if (!governor)
return -EINVAL;
mutex_lock(&cpufreq_governor_mutex);
- list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
- if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
- mutex_unlock(&cpufreq_governor_mutex);
- return -EBUSY;
- }
+ err = -EBUSY;
+ if (__find_governor(governor->name) == NULL) {
+ err = 0;
+ list_add(&governor->governor_list, &cpufreq_governor_list);
}
- list_add(&governor->governor_list, &cpufreq_governor_list);
mutex_unlock(&cpufreq_governor_mutex);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
@@ -1343,6 +1371,11 @@
memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));
+ if (policy->min > data->min && policy->min > policy->max) {
+ ret = -EINVAL;
+ goto error_out;
+ }
+
/* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(policy);
if (ret)
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index bf6ab8a..a1cfd4e 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -29,6 +29,7 @@
#include <linux/rcupdate.h>
#include <linux/completion.h>
#include <linux/kobject.h>
+#include <linux/platform_device.h>
#define EDAC_MC_LABEL_LEN 31
#define MC_PROC_NAME_MAX_LEN 7
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index ced309f..eae9e81 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -232,7 +232,7 @@
unsigned long timeout;
timeout = jiffies + POLL_TIMEOUT;
- while (time_before(jiffies, timeout)) {
+ while (1) {
status = inb(ACBST);
/* Reset the status register to avoid the hang */
@@ -242,7 +242,10 @@
scx200_acb_machine(iface, status);
return;
}
- yield();
+ if (time_after(jiffies, timeout))
+ break;
+ cpu_relax();
+ cond_resched();
}
dev_err(&iface->adapter.dev, "timeout in state %s\n",
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index aaa74f2..b08755e 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -2515,6 +2515,9 @@
sdev->skip_ms_page_8 = 1;
if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
sdev->fix_capacity = 1;
+ if (scsi_id->ne->guid_vendor_id == 0x0010b9 && /* Maxtor's OUI */
+ (sdev->type == TYPE_DISK || sdev->type == TYPE_RBC))
+ sdev->allow_restart = 1;
return 0;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f85c97f..0de335b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -975,8 +975,10 @@
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
- if (IS_ERR(cm_id_priv->timewait_info))
+ if (IS_ERR(cm_id_priv->timewait_info)) {
+ ret = PTR_ERR(cm_id_priv->timewait_info);
goto out;
+ }
ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
if (ret)
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bb9bee5..102a59c 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -42,6 +42,7 @@
#include <linux/kref.h>
#include <linux/idr.h>
#include <linux/mutex.h>
+#include <linux/completion.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -69,6 +70,7 @@
struct ib_uverbs_device {
struct kref ref;
+ struct completion comp;
int devnum;
struct cdev *dev;
struct class_device *class_dev;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e725ccc..4e16314 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -122,7 +122,7 @@
struct ib_uverbs_device *dev =
container_of(ref, struct ib_uverbs_device, ref);
- kfree(dev);
+ complete(&dev->comp);
}
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
@@ -740,6 +740,7 @@
return;
kref_init(&uverbs_dev->ref);
+ init_completion(&uverbs_dev->comp);
spin_lock(&map_lock);
uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -793,6 +794,8 @@
err:
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+ wait_for_completion(&uverbs_dev->comp);
+ kfree(uverbs_dev);
return;
}
@@ -812,7 +815,10 @@
spin_unlock(&map_lock);
clear_bit(uverbs_dev->devnum, dev_map);
+
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+ wait_for_completion(&uverbs_dev->comp);
+ kfree(uverbs_dev);
}
static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index 9ba3211..25157f5 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -108,14 +108,15 @@
* serialize access to the array.
*/
+#define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1)
+
void *mthca_array_get(struct mthca_array *array, int index)
{
int p = (index * sizeof (void *)) >> PAGE_SHIFT;
- if (array->page_list[p].page) {
- int i = index & (PAGE_SIZE / sizeof (void *) - 1);
- return array->page_list[p].page[i];
- } else
+ if (array->page_list[p].page)
+ return array->page_list[p].page[index & MTHCA_ARRAY_MASK];
+ else
return NULL;
}
@@ -130,8 +131,7 @@
if (!array->page_list[p].page)
return -ENOMEM;
- array->page_list[p].page[index & (PAGE_SIZE / sizeof (void *) - 1)] =
- value;
+ array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value;
++array->page_list[p].used;
return 0;
@@ -144,7 +144,8 @@
if (--array->page_list[p].used == 0) {
free_page((unsigned long) array->page_list[p].page);
array->page_list[p].page = NULL;
- }
+ } else
+ array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL;
if (array->page_list[p].used < 0)
pr_debug("Array %p index %d page %d with ref count %d < 0\n",
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 13d6d01..d74653d 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -6,8 +6,7 @@
transports IP packets over InfiniBand so you can use your IB
device as a fancy NIC.
- The IPoIB protocol is defined by the IETF ipoib working
- group: <http://www.ietf.org/html.charters/ipoib-charter.html>.
+ See Documentation/infiniband/ipoib.txt for more information
config INFINIBAND_IPOIB_DEBUG
bool "IP-over-InfiniBand debugging" if EMBEDDED
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8f472e7..8257d5a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -77,6 +77,14 @@
static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
+static int mellanox_workarounds = 1;
+
+module_param(mellanox_workarounds, int, 0444);
+MODULE_PARM_DESC(mellanox_workarounds,
+ "Enable workarounds for Mellanox SRP target bugs if != 0");
+
+static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
+
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
static void srp_completion(struct ib_cq *cq, void *target_ptr);
@@ -526,8 +534,10 @@
while (ib_poll_cq(target->cq, 1, &wc) > 0)
; /* nothing */
+ spin_lock_irq(target->scsi_host->host_lock);
list_for_each_entry_safe(req, tmp, &target->req_queue, list)
srp_reset_req(target, req);
+ spin_unlock_irq(target->scsi_host->host_lock);
target->rx_head = 0;
target->tx_head = 0;
@@ -567,7 +577,7 @@
return ret;
}
-static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat,
+static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
int sg_cnt, struct srp_request *req,
struct srp_direct_buf *buf)
{
@@ -577,10 +587,15 @@
int page_cnt;
int i, j;
int ret;
+ struct srp_device *dev = target->srp_host->dev;
if (!dev->fmr_pool)
return -ENODEV;
+ if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) &&
+ mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
+ return -EINVAL;
+
len = page_cnt = 0;
for (i = 0; i < sg_cnt; ++i) {
if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
@@ -683,7 +698,7 @@
buf->va = cpu_to_be64(sg_dma_address(scat));
buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
buf->len = cpu_to_be32(sg_dma_len(scat));
- } else if (srp_map_fmr(target->srp_host->dev, scat, count, req,
+ } else if (srp_map_fmr(target, scat, count, req,
(void *) cmd->add_data)) {
/*
* FMR mapping failed, and the scatterlist has more
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
index 0a5be7f..af3eb9e 100644
--- a/drivers/isdn/hardware/eicon/divasync.h
+++ b/drivers/isdn/hardware/eicon/divasync.h
@@ -256,7 +256,6 @@
#define NO_ORDER_CHECK_MASK 0x00000010
#define LOW_CHANNEL_MASK 0x00000020
#define NO_HSCX30_MASK 0x00000040
-#define MODE_MASK 0x00000080
#define SET_BOARD 0x00001000
#define SET_CRC4 0x00030000
#define SET_L1_TRISTATE 0x00040000
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index ff83c9b..b99c19c 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -162,7 +162,7 @@
goto out;
}
- min_spacing = mddev->array_size;
+ min_spacing = conf->array_size;
sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *));
/* min_spacing is the minimum spacing that will fit the hash
@@ -171,7 +171,7 @@
* that is larger than min_spacing as use the size of that as
* the actual spacing
*/
- conf->hash_spacing = mddev->array_size;
+ conf->hash_spacing = conf->array_size;
for (i=0; i < cnt-1 ; i++) {
sector_t sz = 0;
int j;
@@ -228,7 +228,7 @@
curr_offset = 0;
i = 0;
for (curr_offset = 0;
- curr_offset < mddev->array_size;
+ curr_offset < conf->array_size;
curr_offset += conf->hash_spacing) {
while (i < mddev->raid_disks-1 &&
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index c3e52c8..06440a8 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -177,6 +177,7 @@
struct work_struct watchdog_work;
struct timer_list watchdog_timer;
int watchdog_tx_done;
+ int watchdog_tx_req;
int watchdog_resets;
int tx_linearized;
int pause;
@@ -448,6 +449,7 @@
struct mcp_gen_header *hdr;
size_t hdr_offset;
int status;
+ unsigned i;
if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
@@ -479,18 +481,12 @@
goto abort_with_fw;
crc = crc32(~0, fw->data, fw->size);
- if (mgp->tx.boundary == 2048) {
- /* Avoid PCI burst on chipset with unaligned completions. */
- int i;
- __iomem u32 *ptr = (__iomem u32 *) (mgp->sram +
- MYRI10GE_FW_OFFSET);
- for (i = 0; i < fw->size / 4; i++) {
- __raw_writel(((u32 *) fw->data)[i], ptr + i);
- wmb();
- }
- } else {
- myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data,
- fw->size);
+ for (i = 0; i < fw->size; i += 256) {
+ myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
+ fw->data + i,
+ min(256U, (unsigned)(fw->size - i)));
+ mb();
+ readb(mgp->sram);
}
/* corruption checking is good for parity recovery and buggy chipset */
memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
@@ -2547,7 +2543,8 @@
mgp = (struct myri10ge_priv *)arg;
if (mgp->tx.req != mgp->tx.done &&
- mgp->tx.done == mgp->watchdog_tx_done)
+ mgp->tx.done == mgp->watchdog_tx_done &&
+ mgp->watchdog_tx_req != mgp->watchdog_tx_done)
/* nic seems like it might be stuck.. */
schedule_work(&mgp->watchdog_work);
else
@@ -2556,6 +2553,7 @@
jiffies + myri10ge_watchdog_timeout * HZ);
mgp->watchdog_tx_done = mgp->tx.done;
+ mgp->watchdog_tx_req = mgp->tx.req;
}
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7d5c223..f5aad77 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -419,9 +419,8 @@
/* phy_stop_machine
*
- * description: Stops the state machine timer, sets the state to
- * UP (unless it wasn't up yet), and then frees the interrupt,
- * if it is in use. This function must be called BEFORE
+ * description: Stops the state machine timer, sets the state to UP
+ * (unless it wasn't up yet). This function must be called BEFORE
* phy_detach.
*/
void phy_stop_machine(struct phy_device *phydev)
@@ -433,9 +432,6 @@
phydev->state = PHY_UP;
spin_unlock(&phydev->lock);
- if (phydev->irq != PHY_POLL)
- phy_stop_interrupts(phydev);
-
phydev->adjust_state = NULL;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e1fe3a0..132ed32 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -76,7 +76,7 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.14.2"
+#define DRV_VERSION "2.0.15.2"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
@@ -370,38 +370,50 @@
END_SIGN
};
+MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+
/* Module Loadable parameters. */
-static unsigned int tx_fifo_num = 1;
+S2IO_PARM_INT(tx_fifo_num, 1);
+S2IO_PARM_INT(rx_ring_num, 1);
+
+
+S2IO_PARM_INT(rx_ring_mode, 1);
+S2IO_PARM_INT(use_continuous_tx_intrs, 1);
+S2IO_PARM_INT(rmac_pause_time, 0x100);
+S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
+S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
+S2IO_PARM_INT(shared_splits, 0);
+S2IO_PARM_INT(tmac_util_period, 5);
+S2IO_PARM_INT(rmac_util_period, 5);
+S2IO_PARM_INT(bimodal, 0);
+S2IO_PARM_INT(l3l4hdr_size, 128);
+/* Frequency of Rx desc syncs expressed as power of 2 */
+S2IO_PARM_INT(rxsync_frequency, 3);
+/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
+S2IO_PARM_INT(intr_type, 0);
+/* Large receive offload feature */
+S2IO_PARM_INT(lro, 0);
+/* Max pkts to be aggregated by LRO at one time. If not specified,
+ * aggregation happens until we hit max IP pkt size(64K)
+ */
+S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
+#ifndef CONFIG_S2IO_NAPI
+S2IO_PARM_INT(indicate_max_pkts, 0);
+#endif
+
static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
-static unsigned int rx_ring_num = 1;
static unsigned int rx_ring_sz[MAX_RX_RINGS] =
{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
static unsigned int rts_frm_len[MAX_RX_RINGS] =
{[0 ...(MAX_RX_RINGS - 1)] = 0 };
-static unsigned int rx_ring_mode = 1;
-static unsigned int use_continuous_tx_intrs = 1;
-static unsigned int rmac_pause_time = 0x100;
-static unsigned int mc_pause_threshold_q0q3 = 187;
-static unsigned int mc_pause_threshold_q4q7 = 187;
-static unsigned int shared_splits;
-static unsigned int tmac_util_period = 5;
-static unsigned int rmac_util_period = 5;
-static unsigned int bimodal = 0;
-static unsigned int l3l4hdr_size = 128;
-#ifndef CONFIG_S2IO_NAPI
-static unsigned int indicate_max_pkts;
-#endif
-/* Frequency of Rx desc syncs expressed as power of 2 */
-static unsigned int rxsync_frequency = 3;
-/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
-static unsigned int intr_type = 0;
-/* Large receive offload feature */
-static unsigned int lro = 0;
-/* Max pkts to be aggregated by LRO at one time. If not specified,
- * aggregation happens until we hit max IP pkt size(64K)
- */
-static unsigned int lro_max_pkts = 0xFFFF;
+
+module_param_array(tx_fifo_len, uint, NULL, 0);
+module_param_array(rx_ring_sz, uint, NULL, 0);
+module_param_array(rts_frm_len, uint, NULL, 0);
/*
* S2IO device table.
@@ -464,10 +476,9 @@
size += config->tx_cfg[i].fifo_len;
}
if (size > MAX_AVAILABLE_TXDS) {
- DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
- __FUNCTION__);
+ DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
- return FAILURE;
+ return -EINVAL;
}
lst_size = (sizeof(TxD_t) * config->max_txds);
@@ -547,6 +558,7 @@
nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
if (!nic->ufo_in_band_v)
return -ENOMEM;
+ memset(nic->ufo_in_band_v, 0, size);
/* Allocation and initialization of RXDs in Rings */
size = 0;
@@ -1213,7 +1225,7 @@
break;
}
- /* Enable Tx FIFO partition 0. */
+ /* Enable all configured Tx FIFO partitions */
val64 = readq(&bar0->tx_fifo_partition_0);
val64 |= (TX_FIFO_PARTITION_EN);
writeq(val64, &bar0->tx_fifo_partition_0);
@@ -1650,7 +1662,7 @@
writeq(temp64, &bar0->general_int_mask);
/*
* If Hercules adapter enable GPIO otherwise
- * disabled all PCIX, Flash, MDIO, IIC and GPIO
+ * disable all PCIX, Flash, MDIO, IIC and GPIO
* interrupts for now.
* TODO
*/
@@ -2119,7 +2131,7 @@
frag->size, PCI_DMA_TODEVICE);
}
}
- txdlp->Host_Control = 0;
+ memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
return(skb);
}
@@ -2371,9 +2383,14 @@
skb->data = (void *) (unsigned long)tmp;
skb->tail = (void *) (unsigned long)tmp;
- ((RxD3_t*)rxdp)->Buffer0_ptr =
- pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
+ if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
+ ((RxD3_t*)rxdp)->Buffer0_ptr =
+ pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
+ else
+ pci_dma_sync_single_for_device(nic->pdev,
+ (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
+ BUF0_LEN, PCI_DMA_FROMDEVICE);
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
if (nic->rxd_mode == RXD_MODE_3B) {
/* Two buffer mode */
@@ -2386,10 +2403,13 @@
(nic->pdev, skb->data, dev->mtu + 4,
PCI_DMA_FROMDEVICE);
- /* Buffer-1 will be dummy buffer not used */
- ((RxD3_t*)rxdp)->Buffer1_ptr =
- pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
- PCI_DMA_FROMDEVICE);
+ /* Buffer-1 will be dummy buffer. Not used */
+ if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
+ ((RxD3_t*)rxdp)->Buffer1_ptr =
+ pci_map_single(nic->pdev,
+ ba->ba_1, BUF1_LEN,
+ PCI_DMA_FROMDEVICE);
+ }
rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
rxdp->Control_2 |= SET_BUFFER2_SIZE_3
(dev->mtu + 4);
@@ -2614,23 +2634,23 @@
}
#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * s2io_netpoll - Rx interrupt service handler for netpoll support
+ * s2io_netpoll - netpoll event handler entry point
* @dev : pointer to the device structure.
* Description:
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
+ * This function will be called by upper layer to check for events on the
+ * interface in situations where interrupts are disabled. It is used for
+ * specific in-kernel networking tasks, such as remote consoles and kernel
+ * debugging over the network (example netdump in RedHat).
*/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
static void s2io_netpoll(struct net_device *dev)
{
nic_t *nic = dev->priv;
mac_info_t *mac_control;
struct config_param *config;
XENA_dev_config_t __iomem *bar0 = nic->bar0;
- u64 val64;
+ u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
int i;
disable_irq(dev->irq);
@@ -2639,9 +2659,17 @@
mac_control = &nic->mac_control;
config = &nic->config;
- val64 = readq(&bar0->rx_traffic_int);
writeq(val64, &bar0->rx_traffic_int);
+ writeq(val64, &bar0->tx_traffic_int);
+ /* we need to free up the transmitted skbufs or else netpoll will
+ * run out of skbs and will fail and eventually netpoll application such
+ * as netdump will fail.
+ */
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&mac_control->fifos[i]);
+
+ /* check for received packet and indicate up to network */
for (i = 0; i < config->rx_ring_num; i++)
rx_intr_handler(&mac_control->rings[i]);
@@ -2708,7 +2736,7 @@
/* If your are next to put index then it's FIFO full condition */
if ((get_block == put_block) &&
(get_info.offset + 1) == put_info.offset) {
- DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
+ DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
break;
}
skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
@@ -2728,18 +2756,15 @@
HEADER_SNAP_SIZE,
PCI_DMA_FROMDEVICE);
} else if (nic->rxd_mode == RXD_MODE_3B) {
- pci_unmap_single(nic->pdev, (dma_addr_t)
+ pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
((RxD3_t*)rxdp)->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
pci_unmap_single(nic->pdev, (dma_addr_t)
- ((RxD3_t*)rxdp)->Buffer1_ptr,
- BUF1_LEN, PCI_DMA_FROMDEVICE);
- pci_unmap_single(nic->pdev, (dma_addr_t)
((RxD3_t*)rxdp)->Buffer2_ptr,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
} else {
- pci_unmap_single(nic->pdev, (dma_addr_t)
+ pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
PCI_DMA_FROMDEVICE);
pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -3327,7 +3352,7 @@
/* Clear certain PCI/PCI-X fields after reset */
if (sp->device_type == XFRAME_II_DEVICE) {
- /* Clear parity err detect bit */
+ /* Clear "detected parity error" bit */
pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
/* Clearing PCIX Ecc status register */
@@ -3528,7 +3553,7 @@
u64 val64;
int i;
- for (i=0; i< nic->avail_msix_vectors; i++) {
+ for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data);
val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3547,7 +3572,7 @@
int i;
/* Store and display */
- for (i=0; i< nic->avail_msix_vectors; i++) {
+ for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
val64 = (BIT(15) | vBIT(i, 26, 6));
writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, i)) {
@@ -3808,13 +3833,11 @@
TxD_t *txdp;
TxFIFO_element_t __iomem *tx_fifo;
unsigned long flags;
-#ifdef NETIF_F_TSO
- int mss;
-#endif
u16 vlan_tag = 0;
int vlan_priority = 0;
mac_info_t *mac_control;
struct config_param *config;
+ int offload_type;
mac_control = &sp->mac_control;
config = &sp->config;
@@ -3862,13 +3885,11 @@
return 0;
}
- txdp->Control_1 = 0;
- txdp->Control_2 = 0;
+ offload_type = s2io_offload_type(skb);
#ifdef NETIF_F_TSO
- mss = skb_shinfo(skb)->gso_size;
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
txdp->Control_1 |= TXD_TCP_LSO_EN;
- txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
+ txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
}
#endif
if (skb->ip_summed == CHECKSUM_HW) {
@@ -3886,10 +3907,10 @@
}
frg_len = skb->len - skb->data_len;
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) {
+ if (offload_type == SKB_GSO_UDP) {
int ufo_size;
- ufo_size = skb_shinfo(skb)->gso_size;
+ ufo_size = s2io_udp_mss(skb);
ufo_size &= ~7;
txdp->Control_1 |= TXD_UFO_EN;
txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
@@ -3906,16 +3927,13 @@
sp->ufo_in_band_v,
sizeof(u64), PCI_DMA_TODEVICE);
txdp++;
- txdp->Control_1 = 0;
- txdp->Control_2 = 0;
}
txdp->Buffer_Pointer = pci_map_single
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
txdp->Host_Control = (unsigned long) skb;
txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
-
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
+ if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
frg_cnt = skb_shinfo(skb)->nr_frags;
@@ -3930,12 +3948,12 @@
(sp->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
+ if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
}
txdp->Control_1 |= TXD_GATHER_CODE_LAST;
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
+ if (offload_type == SKB_GSO_UDP)
frg_cnt++; /* as Txd0 was used for inband header */
tx_fifo = mac_control->tx_FIFO_start[queue];
@@ -3944,13 +3962,9 @@
val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
TX_FIFO_LAST_LIST);
+ if (offload_type)
+ val64 |= TX_FIFO_SPECIAL_FUNC;
-#ifdef NETIF_F_TSO
- if (mss)
- val64 |= TX_FIFO_SPECIAL_FUNC;
-#endif
- if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
- val64 |= TX_FIFO_SPECIAL_FUNC;
writeq(val64, &tx_fifo->List_Control);
mmiowb();
@@ -3984,13 +3998,41 @@
mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
}
+static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
+{
+ int rxb_size, level;
+
+ if (!sp->lro) {
+ rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
+ level = rx_buffer_level(sp, rxb_size, rng_n);
+
+ if ((level == PANIC) && (!TASKLET_IN_USE)) {
+ int ret;
+ DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
+ DBG_PRINT(INTR_DBG, "PANIC levels\n");
+ if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "Out of memory in %s",
+ __FUNCTION__);
+ clear_bit(0, (&sp->tasklet_status));
+ return -1;
+ }
+ clear_bit(0, (&sp->tasklet_status));
+ } else if (level == LOW)
+ tasklet_schedule(&sp->task);
+
+ } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
+ DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
+ DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
+ }
+ return 0;
+}
+
static irqreturn_t
s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *) dev_id;
nic_t *sp = dev->priv;
int i;
- int ret;
mac_info_t *mac_control;
struct config_param *config;
@@ -4012,35 +4054,8 @@
* reallocate the buffers from the interrupt handler itself,
* else schedule a tasklet to reallocate the buffers.
*/
- for (i = 0; i < config->rx_ring_num; i++) {
- if (!sp->lro) {
- int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
- int level = rx_buffer_level(sp, rxb_size, i);
-
- if ((level == PANIC) && (!TASKLET_IN_USE)) {
- DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
- dev->name);
- DBG_PRINT(INTR_DBG, "PANIC levels\n");
- if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory",
- dev->name);
- DBG_PRINT(ERR_DBG, " in ISR!!\n");
- clear_bit(0, (&sp->tasklet_status));
- atomic_dec(&sp->isr_cnt);
- return IRQ_HANDLED;
- }
- clear_bit(0, (&sp->tasklet_status));
- } else if (level == LOW) {
- tasklet_schedule(&sp->task);
- }
- }
- else if (fill_rx_buffers(sp, i) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory",
- dev->name);
- DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
- break;
- }
- }
+ for (i = 0; i < config->rx_ring_num; i++)
+ s2io_chk_rx_buffers(sp, i);
atomic_dec(&sp->isr_cnt);
return IRQ_HANDLED;
@@ -4051,39 +4066,13 @@
{
ring_info_t *ring = (ring_info_t *)dev_id;
nic_t *sp = ring->nic;
- struct net_device *dev = (struct net_device *) dev_id;
- int rxb_size, level, rng_n;
atomic_inc(&sp->isr_cnt);
+
rx_intr_handler(ring);
-
- rng_n = ring->ring_no;
- if (!sp->lro) {
- rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
- level = rx_buffer_level(sp, rxb_size, rng_n);
-
- if ((level == PANIC) && (!TASKLET_IN_USE)) {
- int ret;
- DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
- DBG_PRINT(INTR_DBG, "PANIC levels\n");
- if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "Out of memory in %s",
- __FUNCTION__);
- clear_bit(0, (&sp->tasklet_status));
- return IRQ_HANDLED;
- }
- clear_bit(0, (&sp->tasklet_status));
- } else if (level == LOW) {
- tasklet_schedule(&sp->task);
- }
- }
- else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
- DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
- }
+ s2io_chk_rx_buffers(sp, ring->ring_no);
atomic_dec(&sp->isr_cnt);
-
return IRQ_HANDLED;
}
@@ -4248,37 +4237,8 @@
* else schedule a tasklet to reallocate the buffers.
*/
#ifndef CONFIG_S2IO_NAPI
- for (i = 0; i < config->rx_ring_num; i++) {
- if (!sp->lro) {
- int ret;
- int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
- int level = rx_buffer_level(sp, rxb_size, i);
-
- if ((level == PANIC) && (!TASKLET_IN_USE)) {
- DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
- dev->name);
- DBG_PRINT(INTR_DBG, "PANIC levels\n");
- if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory",
- dev->name);
- DBG_PRINT(ERR_DBG, " in ISR!!\n");
- clear_bit(0, (&sp->tasklet_status));
- atomic_dec(&sp->isr_cnt);
- writeq(org_mask, &bar0->general_int_mask);
- return IRQ_HANDLED;
- }
- clear_bit(0, (&sp->tasklet_status));
- } else if (level == LOW) {
- tasklet_schedule(&sp->task);
- }
- }
- else if (fill_rx_buffers(sp, i) == -ENOMEM) {
- DBG_PRINT(ERR_DBG, "%s:Out of memory",
- dev->name);
- DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
- break;
- }
- }
+ for (i = 0; i < config->rx_ring_num; i++)
+ s2io_chk_rx_buffers(sp, i);
#endif
writeq(org_mask, &bar0->general_int_mask);
atomic_dec(&sp->isr_cnt);
@@ -4308,6 +4268,8 @@
if (cnt == 5)
break; /* Updt failed */
} while(1);
+ } else {
+ memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
}
}
@@ -4942,7 +4904,8 @@
}
static void s2io_vpd_read(nic_t *nic)
{
- u8 vpd_data[256],data;
+ u8 *vpd_data;
+ u8 data;
int i=0, cnt, fail = 0;
int vpd_addr = 0x80;
@@ -4955,6 +4918,10 @@
vpd_addr = 0x50;
}
+ vpd_data = kmalloc(256, GFP_KERNEL);
+ if (!vpd_data)
+ return;
+
for (i = 0; i < 256; i +=4 ) {
pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
@@ -4977,6 +4944,7 @@
memset(nic->product_name, 0, vpd_data[1]);
memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
}
+ kfree(vpd_data);
}
/**
@@ -5295,7 +5263,7 @@
else
*data = 0;
- return 0;
+ return *data;
}
/**
@@ -5753,6 +5721,19 @@
return 0;
}
+static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_TSO) != 0;
+}
+static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return 0;
+}
static struct ethtool_ops netdev_ethtool_ops = {
.get_settings = s2io_ethtool_gset,
@@ -5773,8 +5754,8 @@
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
#ifdef NETIF_F_TSO
- .get_tso = ethtool_op_get_tso,
- .set_tso = ethtool_op_set_tso,
+ .get_tso = s2io_ethtool_op_get_tso,
+ .set_tso = s2io_ethtool_op_set_tso,
#endif
.get_ufo = ethtool_op_get_ufo,
.set_ufo = ethtool_op_set_ufo,
@@ -6337,7 +6318,7 @@
s2io_set_multicast(dev);
if (sp->lro) {
- /* Initialize max aggregatable pkts based on MTU */
+ /* Initialize max aggregatable pkts per session based on MTU */
sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
/* Check if we can use(if specified) user provided value */
if (lro_max_pkts < sp->lro_max_aggr_per_sess)
@@ -6438,7 +6419,7 @@
* @cksum : FCS checksum of the frame.
* @ring_no : the ring from which this RxD was extracted.
* Description:
- * This function is called by the Tx interrupt serivce routine to perform
+ * This function is called by the Rx interrupt serivce routine to perform
* some OS related operations on the SKB before passing it to the upper
* layers. It mainly checks if the checksum is OK, if so adds it to the
* SKBs cksum variable, increments the Rx packet count and passes the SKB
@@ -6698,33 +6679,6 @@
pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
}
-MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_param(tx_fifo_num, int, 0);
-module_param(rx_ring_num, int, 0);
-module_param(rx_ring_mode, int, 0);
-module_param_array(tx_fifo_len, uint, NULL, 0);
-module_param_array(rx_ring_sz, uint, NULL, 0);
-module_param_array(rts_frm_len, uint, NULL, 0);
-module_param(use_continuous_tx_intrs, int, 1);
-module_param(rmac_pause_time, int, 0);
-module_param(mc_pause_threshold_q0q3, int, 0);
-module_param(mc_pause_threshold_q4q7, int, 0);
-module_param(shared_splits, int, 0);
-module_param(tmac_util_period, int, 0);
-module_param(rmac_util_period, int, 0);
-module_param(bimodal, bool, 0);
-module_param(l3l4hdr_size, int , 0);
-#ifndef CONFIG_S2IO_NAPI
-module_param(indicate_max_pkts, int, 0);
-#endif
-module_param(rxsync_frequency, int, 0);
-module_param(intr_type, int, 0);
-module_param(lro, int, 0);
-module_param(lro_max_pkts, int, 0);
-
static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
{
if ( tx_fifo_num > 8) {
@@ -6832,8 +6786,8 @@
}
if (dev_intr_type != MSI_X) {
if (pci_request_regions(pdev, s2io_driver_name)) {
- DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
- pci_disable_device(pdev);
+ DBG_PRINT(ERR_DBG, "Request Regions failed\n");
+ pci_disable_device(pdev);
return -ENODEV;
}
}
@@ -6957,7 +6911,7 @@
/* initialize the shared memory used by the NIC and the host */
if (init_shared_mem(sp)) {
DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
- __FUNCTION__);
+ dev->name);
ret = -ENOMEM;
goto mem_alloc_failed;
}
@@ -7094,6 +7048,9 @@
dev->addr_len = ETH_ALEN;
memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
+ /* reset Nic and bring it to known state */
+ s2io_reset(sp);
+
/*
* Initialize the tasklet status and link state flags
* and the card state parameter
@@ -7131,11 +7088,11 @@
goto register_failed;
}
s2io_vpd_read(sp);
- DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
- DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
- get_xena_rev_id(sp->pdev),
- s2io_driver_version);
DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
+ DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
+ sp->product_name, get_xena_rev_id(sp->pdev));
+ DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
+ s2io_driver_version);
DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
"%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
sp->def_mac_addr[0].mac_addr[0],
@@ -7436,8 +7393,13 @@
if (ip->ihl != 5) /* IP has options */
return -1;
+ /* If we see CE codepoint in IP header, packet is not mergeable */
+ if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
+ return -1;
+
+ /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
- !tcp->ack) {
+ tcp->ece || tcp->cwr || !tcp->ack) {
/*
* Currently recognize only the ack control word and
* any other control field being set would result in
@@ -7591,18 +7553,16 @@
static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
u32 tcp_len)
{
- struct sk_buff *tmp, *first = lro->parent;
+ struct sk_buff *first = lro->parent;
first->len += tcp_len;
first->data_len = lro->frags_len;
skb_pull(skb, (skb->len - tcp_len));
- if ((tmp = skb_shinfo(first)->frag_list)) {
- while (tmp->next)
- tmp = tmp->next;
- tmp->next = skb;
- }
+ if (skb_shinfo(first)->frag_list)
+ lro->last_frag->next = skb;
else
skb_shinfo(first)->frag_list = skb;
+ lro->last_frag = skb;
sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
return;
}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 217097b..5ed49c3 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -719,6 +719,7 @@
/* Data structure to represent a LRO session */
typedef struct lro {
struct sk_buff *parent;
+ struct sk_buff *last_frag;
u8 *l2h;
struct iphdr *iph;
struct tcphdr *tcph;
@@ -1011,4 +1012,13 @@
static void queue_rx_frame(struct sk_buff *skb);
static void update_L3L4_header(nic_t *sp, lro_t *lro);
static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len);
+
+#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
+#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
+#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
+
+#define S2IO_PARM_INT(X, def_val) \
+ static unsigned int X = def_val;\
+ module_param(X , uint, 0);
+
#endif /* _S2IO_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index efc9c4b..da9d06b 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -797,7 +797,7 @@
{ CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
{ CR_ZD1211_RETRY_MAX, 0x2 },
{ CR_SNIFFER_ON, 0 },
- { CR_RX_FILTER, AP_RX_FILTER },
+ { CR_RX_FILTER, STA_RX_FILTER },
{ CR_GROUP_HASH_P1, 0x00 },
{ CR_GROUP_HASH_P2, 0x80000000 },
{ CR_REG1, 0xa4 },
@@ -844,7 +844,7 @@
{ CR_ZD1211B_AIFS_CTL2, 0x008C003C },
{ CR_ZD1211B_TXOP, 0x01800824 },
{ CR_SNIFFER_ON, 0 },
- { CR_RX_FILTER, AP_RX_FILTER },
+ { CR_RX_FILTER, STA_RX_FILTER },
{ CR_GROUP_HASH_P1, 0x00 },
{ CR_GROUP_HASH_P2, 0x80000000 },
{ CR_REG1, 0xa4 },
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 8051210..069d2b4 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -461,10 +461,15 @@
#define CR_RX_FILTER CTL_REG(0x068c)
#define RX_FILTER_ASSOC_RESPONSE 0x0002
+#define RX_FILTER_REASSOC_RESPONSE 0x0008
#define RX_FILTER_PROBE_RESPONSE 0x0020
#define RX_FILTER_BEACON 0x0100
+#define RX_FILTER_DISASSOC 0x0400
#define RX_FILTER_AUTH 0x0800
-/* Sniff modus sets filter to 0xfffff */
+#define AP_RX_FILTER 0x0400feff
+#define STA_RX_FILTER 0x0000ffff
+
+/* Monitor mode sets filter to 0xfffff */
#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690)
#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694)
@@ -546,9 +551,6 @@
#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
-#define AP_RX_FILTER 0x0400feff
-#define STA_RX_FILTER 0x0000ffff
-
#define CWIN_SIZE 0x007f043f
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 3bdc54d..d6f3e02 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -108,7 +108,9 @@
if (r)
goto disable_int;
- r = zd_set_encryption_type(chip, NO_WEP);
+ /* We must inform the device that we are doing encryption/decryption in
+ * software at the moment. */
+ r = zd_set_encryption_type(chip, ENC_SNIFFER);
if (r)
goto disable_int;
@@ -136,10 +138,8 @@
{
struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
struct zd_ioreq32 ioreqs[3] = {
- { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE|
- RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE },
+ { CR_RX_FILTER, STA_RX_FILTER },
{ CR_SNIFFER_ON, 0U },
- { CR_ENCRYPTION_TYPE, NO_WEP },
};
if (ieee->iw_mode == IW_MODE_MONITOR) {
@@ -713,10 +713,10 @@
struct zd_rt_hdr {
struct ieee80211_radiotap_header rt_hdr;
u8 rt_flags;
+ u8 rt_rate;
u16 rt_channel;
u16 rt_chbitmask;
- u16 rt_rate;
-};
+} __attribute__((packed));
static void fill_rt_header(void *buffer, struct zd_mac *mac,
const struct ieee80211_rx_stats *stats,
@@ -735,14 +735,14 @@
if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256))
hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP;
+ hdr->rt_rate = stats->rate / 5;
+
/* FIXME: 802.11a */
hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz(
_zd_chip_get_channel(&mac->chip)));
hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ |
((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) ==
ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK));
-
- hdr->rt_rate = stats->rate / 5;
}
/* Returns 1 if the data packet is for us and 0 otherwise. */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 72f9052..6320984 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -323,7 +323,6 @@
{
struct zd_usb_interrupt *intr = &usb->intr;
- ZD_ASSERT(in_interrupt());
spin_lock(&intr->lock);
intr->read_regs_enabled = 0;
spin_unlock(&intr->lock);
@@ -545,11 +544,11 @@
* be padded. Unaligned access might also happen if the length_info
* structure is not present.
*/
- if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) {
+ if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG))
+ {
unsigned int l, k, n;
for (i = 0, l = 0;; i++) {
- k = le16_to_cpu(get_unaligned(
- &length_info->length[i]));
+ k = le16_to_cpu(get_unaligned(&length_info->length[i]));
n = l+k;
if (n > length)
return;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 34de569..e2fef60 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -27,8 +27,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <gregkh@us.ibm.com>,
- * <t-kochi@bq.jp.nec.com>
+ * Send feedback to <kristen.c.accardi@intel.com>
*
*/
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index ef95d12..ae67a8f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -26,7 +26,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Send feedback to <t-kochi@bq.jp.nec.com>
+ * Send feedback to <kristen.c.accardi@intel.com>
*
*/
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 50bfc1b..478d0d2 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -30,23 +30,6 @@
/* global data */
static const char device_name[] = "pcieport-driver";
-static int pcie_portdrv_save_config(struct pci_dev *dev)
-{
- return pci_save_state(dev);
-}
-
-static int pcie_portdrv_restore_config(struct pci_dev *dev)
-{
- int retval;
-
- pci_restore_state(dev);
- retval = pci_enable_device(dev);
- if (retval)
- return retval;
- pci_set_master(dev);
- return 0;
-}
-
/*
* pcie_portdrv_probe - Probe PCI-Express port devices
* @dev: PCI-Express port device being probed
@@ -73,8 +56,10 @@
"%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n",
__FUNCTION__, dev->device, dev->vendor);
}
- if (pcie_port_device_register(dev))
+ if (pcie_port_device_register(dev)) {
+ pci_disable_device(dev);
return -ENOMEM;
+ }
return 0;
}
@@ -86,6 +71,23 @@
}
#ifdef CONFIG_PM
+static int pcie_portdrv_save_config(struct pci_dev *dev)
+{
+ return pci_save_state(dev);
+}
+
+static int pcie_portdrv_restore_config(struct pci_dev *dev)
+{
+ int retval;
+
+ pci_restore_state(dev);
+ retval = pci_enable_device(dev);
+ if (retval)
+ return retval;
+ pci_set_master(dev);
+ return 0;
+}
+
static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state)
{
int ret = pcie_port_device_suspend(dev, state);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e3c78c3..fb08bc9 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -990,6 +990,11 @@
case 0x8070: /* P4G8X Deluxe */
asus_hides_smbus = 1;
}
+ if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
+ switch (dev->subsystem_device) {
+ case 0x80c9: /* PU-DLS */
+ asus_hides_smbus = 1;
+ }
if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
@@ -1058,6 +1063,7 @@
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge );
@@ -1081,6 +1087,7 @@
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index f8ae2b7..d529462 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -41,7 +41,7 @@
* in the global list of PCI buses. If the bus is found, a pointer to its
* data structure is returned. If no bus is found, %NULL is returned.
*/
-struct pci_bus * __devinit pci_find_bus(int domain, int busnr)
+struct pci_bus * pci_find_bus(int domain, int busnr)
{
struct pci_bus *bus = NULL;
struct pci_bus *tmp_bus;
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 3163e3d..9d8b415 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -265,8 +265,8 @@
pnp_printf(buffer," disabled\n");
else
pnp_printf(buffer," 0x%llx-0x%llx\n",
- pnp_port_start(dev, i),
- pnp_port_end(dev, i));
+ (unsigned long long)pnp_port_start(dev, i),
+ (unsigned long long)pnp_port_end(dev, i));
}
}
for (i = 0; i < PNP_MAX_MEM; i++) {
@@ -276,8 +276,8 @@
pnp_printf(buffer," disabled\n");
else
pnp_printf(buffer," 0x%llx-0x%llx\n",
- pnp_mem_start(dev, i),
- pnp_mem_end(dev, i));
+ (unsigned long long)pnp_mem_start(dev, i),
+ (unsigned long long)pnp_mem_end(dev, i));
}
}
for (i = 0; i < PNP_MAX_IRQ; i++) {
@@ -287,7 +287,7 @@
pnp_printf(buffer," disabled\n");
else
pnp_printf(buffer," %lld\n",
- pnp_irq(dev, i));
+ (unsigned long long)pnp_irq(dev, i));
}
}
for (i = 0; i < PNP_MAX_DMA; i++) {
@@ -297,7 +297,7 @@
pnp_printf(buffer," disabled\n");
else
pnp_printf(buffer," %lld\n",
- pnp_dma(dev, i));
+ (unsigned long long)pnp_dma(dev, i));
}
}
ret = (buffer->curr - buf);
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 2122688..dc79b0a 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -173,6 +173,9 @@
return;
}
+ if (p->producer_consumer == ACPI_PRODUCER)
+ return;
+
if (p->resource_type == ACPI_MEMORY_RANGE)
pnpacpi_parse_allocated_memresource(res_table,
p->minimum, p->address_length);
@@ -252,9 +255,14 @@
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+ if (res->data.ext_address64.producer_consumer == ACPI_PRODUCER)
+ return AE_OK;
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ if (res->data.extended_irq.producer_consumer == ACPI_PRODUCER)
+ return AE_OK;
+
for (i = 0; i < res->data.extended_irq.interrupt_count; i++) {
pnpacpi_parse_allocated_irqresource(res_table,
res->data.extended_irq.interrupts[i],
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 77e7202a..904c25f 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -940,14 +940,8 @@
return;
/* ignore interim PIO setup fis interrupts */
- if (ata_tag_valid(ap->active_tag)) {
- struct ata_queued_cmd *qc =
- ata_qc_from_tag(ap, ap->active_tag);
-
- if (qc && qc->tf.protocol == ATA_PROT_PIO &&
- (status & PORT_IRQ_PIOS_FIS))
- return;
- }
+ if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
+ return;
if (ata_ratelimit())
ata_port_printk(ap, KERN_INFO, "spurious interrupt "
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 8b08121..3e827e0 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1913,9 +1913,6 @@
u8 chip_rev;
u32 dac;
- if (!par->vram_size) /* may have already been probed */
- par->vram_size = aty_ld_le32(CONFIG_MEMSIZE) & 0x03FFFFFF;
-
/* Get the chip revision */
chip_rev = (aty_ld_le32(CONFIG_CNTL) >> 16) & 0x1F;
@@ -2028,9 +2025,6 @@
aty128_init_engine(par);
- if (register_framebuffer(info) < 0)
- return 0;
-
par->pm_reg = pci_find_capability(pdev, PCI_CAP_ID_PM);
par->pdev = pdev;
par->asleep = 0;
@@ -2040,6 +2034,9 @@
aty128_bl_init(par);
#endif
+ if (register_framebuffer(info) < 0)
+ return 0;
+
printk(KERN_INFO "fb%d: %s frame buffer device on %s\n",
info->node, info->fix.id, video_card);
@@ -2089,7 +2086,6 @@
par = info->par;
info->pseudo_palette = par->pseudo_palette;
- info->fix = aty128fb_fix;
/* Virtualize mmio region */
info->fix.mmio_start = reg_addr;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index a92a91f..f25d5d6 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -156,7 +156,7 @@
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.line_length = info->var.xres_virtual << 1; /* depth=16 */
- }
+ }
} else {
/* mono */
info->fix.visual = FB_VISUAL_MONO10;
@@ -164,20 +164,16 @@
}
info->screen_size = info->fix.line_length * info->var.yres_virtual;
+ info->var.rotate = ((fbdev->panel->control_base&LCD_CONTROL_SM_MASK) \
+ >> LCD_CONTROL_SM_BIT) * 90;
/* Determine BPP mode and format */
- fbdev->regs->lcd_control = fbdev->panel->control_base |
- ((info->var.rotate/90) << LCD_CONTROL_SM_BIT);
-
+ fbdev->regs->lcd_control = fbdev->panel->control_base;
+ fbdev->regs->lcd_horztiming = fbdev->panel->horztiming;
+ fbdev->regs->lcd_verttiming = fbdev->panel->verttiming;
+ fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base;
fbdev->regs->lcd_intenable = 0;
fbdev->regs->lcd_intstatus = 0;
-
- fbdev->regs->lcd_horztiming = fbdev->panel->horztiming;
-
- fbdev->regs->lcd_verttiming = fbdev->panel->verttiming;
-
- fbdev->regs->lcd_clkcontrol = fbdev->panel->clkcontrol_base;
-
fbdev->regs->lcd_dmaaddr0 = LCD_DMA_SA_N(fbdev->fb_phys);
if (panel_is_dual(fbdev->panel)) {
@@ -206,6 +202,8 @@
/* Resume controller */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
+ mdelay(10);
+ au1100fb_fb_blank(VESA_NO_BLANKING, info);
return 0;
}
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index fcaeead..50cfca5 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -512,7 +512,11 @@
wchar_t uni;
int unilen, utflen;
char *result;
- int maxlen = in_len; /* The utf8->nls conversion can't make more chars */
+ /* The utf8->nls conversion won't make the final nls string bigger
+ * than the utf one, but if the string is pure ascii they'll have the
+ * same width and an extra char is needed to save the additional \0
+ */
+ int maxlen = in_len + 1;
befs_debug(sb, "---> utf2nls()");
@@ -588,7 +592,10 @@
wchar_t uni;
int unilen, utflen;
char *result;
- int maxlen = 3 * in_len;
+ /* There're nls characters that will translate to 3-chars-wide UTF-8
+ * characters, a additional byte is needed to save the final \0
+ * in special cases */
+ int maxlen = (3 * in_len) + 1;
befs_debug(sb, "---> nls2utf()\n");
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index baf5ae5..c9d4197 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -638,9 +638,6 @@
if (task->tk_status < 0) {
/* RPC error: Re-insert for retransmission */
timeout = 10 * HZ;
- } else if (block->b_done) {
- /* Block already removed, kill it for real */
- timeout = 0;
} else {
/* Call was successful, now wait for client callback */
timeout = 60 * HZ;
@@ -709,13 +706,10 @@
break;
if (time_after(block->b_when,jiffies))
break;
- dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n",
- block, block->b_when, block->b_done);
+ dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
+ block, block->b_when);
kref_get(&block->b_count);
- if (block->b_done)
- nlmsvc_unlink_block(block);
- else
- nlmsvc_grant_blocked(block);
+ nlmsvc_grant_blocked(block);
nlmsvc_release_block(block);
}
diff --git a/fs/namei.c b/fs/namei.c
index e01070d..55a1312 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -159,7 +159,7 @@
#ifdef CONFIG_AUDITSYSCALL
void putname(const char *name)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
audit_putname(name);
else
__putname(name);
@@ -1125,7 +1125,7 @@
retval = link_path_walk(name, nd);
out:
if (likely(retval == 0)) {
- if (unlikely(current->audit_context && nd && nd->dentry &&
+ if (unlikely(!audit_dummy_context() && nd && nd->dentry &&
nd->dentry->d_inode))
audit_inode(name, nd->dentry->d_inode);
}
@@ -1357,7 +1357,7 @@
return -ENOENT;
BUG_ON(victim->d_parent->d_inode != dir);
- audit_inode_child(victim->d_name.name, victim->d_inode, dir->i_ino);
+ audit_inode_child(victim->d_name.name, victim->d_inode, dir);
error = permission(dir,MAY_WRITE | MAY_EXEC, NULL);
if (error)
@@ -1659,6 +1659,7 @@
* It already exists.
*/
mutex_unlock(&dir->d_inode->i_mutex);
+ audit_inode_update(path.dentry->d_inode);
error = -EEXIST;
if (flag & O_EXCL)
@@ -1669,6 +1670,7 @@
if (flag & O_NOFOLLOW)
goto exit_dput;
}
+
error = -ENOENT;
if (!path.dentry->d_inode)
goto exit_dput;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 19b98ca..86b3169 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -51,7 +51,7 @@
namelen = dentry->d_name.len;
buflen -= namelen + 1;
if (buflen < 0)
- goto Elong;
+ goto Elong_unlock;
end -= namelen;
memcpy(end, dentry->d_name.name, namelen);
*--end = '/';
@@ -68,6 +68,8 @@
end -= namelen;
memcpy(end, base, namelen);
return end;
+Elong_unlock:
+ spin_unlock(&dcache_lock);
Elong:
return ERR_PTR(-ENAMETOOLONG);
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 52bf634..65c0c5b 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -63,7 +63,7 @@
return p;
}
-void nfs_readdata_free(struct nfs_read_data *p)
+static void nfs_readdata_free(struct nfs_read_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 86bac6a..5077499 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -137,7 +137,7 @@
return p;
}
-void nfs_writedata_free(struct nfs_write_data *p)
+static void nfs_writedata_free(struct nfs_write_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index f318b58..1627edd 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -48,8 +48,8 @@
return 0;
}
- reiserfs_write_lock(inode->i_sb);
mutex_lock(&inode->i_mutex);
+ reiserfs_write_lock(inode->i_sb);
/* freeing preallocation only involves relogging blocks that
* are already in the current transaction. preallocation gets
* freed at the end of each transaction, so it is impossible for
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 12dfdcf..52f1e21 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -39,14 +39,10 @@
/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
- mutex_lock(&inode->i_mutex);
-
reiserfs_delete_xattrs(inode);
- if (journal_begin(&th, inode->i_sb, jbegin_count)) {
- mutex_unlock(&inode->i_mutex);
+ if (journal_begin(&th, inode->i_sb, jbegin_count))
goto out;
- }
reiserfs_update_inode_transaction(inode);
err = reiserfs_delete_object(&th, inode);
@@ -57,12 +53,8 @@
if (!err)
DQUOT_FREE_INODE(inode);
- if (journal_end(&th, inode->i_sb, jbegin_count)) {
- mutex_unlock(&inode->i_mutex);
+ if (journal_end(&th, inode->i_sb, jbegin_count))
goto out;
- }
-
- mutex_unlock(&inode->i_mutex);
/* check return value from reiserfs_delete_object after
* ending the transaction
@@ -2348,6 +2340,7 @@
unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
int error = 0;
unsigned long block;
+ sector_t last_block;
struct buffer_head *head, *bh;
int partial = 0;
int nr = 0;
@@ -2395,10 +2388,19 @@
}
bh = head;
block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
+ last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
/* first map all the buffers, logging any direct items we find */
do {
- if ((checked || buffer_dirty(bh)) && (!buffer_mapped(bh) ||
- (buffer_mapped(bh)
+ if (block > last_block) {
+ /*
+ * This can happen when the block size is less than
+ * the page size. The corresponding bytes in the page
+ * were zero filled above
+ */
+ clear_buffer_dirty(bh);
+ set_buffer_uptodate(bh);
+ } else if ((checked || buffer_dirty(bh)) &&
+ (!buffer_mapped(bh) || (buffer_mapped(bh)
&& bh->b_blocknr ==
0))) {
/* not mapped yet, or it points to a direct item, search
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 745c881..a986b5e 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -116,12 +116,12 @@
if (REISERFS_I(inode)->i_flags & i_nopack_mask) {
return 0;
}
- reiserfs_write_lock(inode->i_sb);
/* we need to make sure nobody is changing the file size beneath
** us
*/
mutex_lock(&inode->i_mutex);
+ reiserfs_write_lock(inode->i_sb);
write_from = inode->i_size & (blocksize - 1);
/* if we are on a block boundary, we are already unpacked. */
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 3873c67..3332347 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -75,6 +75,12 @@
}
*err = -ENOSPC;
+ UDF_I_UNIQUE(inode) = 0;
+ UDF_I_LENEXTENTS(inode) = 0;
+ UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
+ UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
+ UDF_I_STRAT4096(inode) = 0;
+
block = udf_new_block(dir->i_sb, NULL, UDF_I_LOCATION(dir).partitionReferenceNum,
start, err);
if (*err)
@@ -84,11 +90,6 @@
}
mutex_lock(&sbi->s_alloc_mutex);
- UDF_I_UNIQUE(inode) = 0;
- UDF_I_LENEXTENTS(inode) = 0;
- UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
- UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
- UDF_I_STRAT4096(inode) = 0;
if (UDF_SB_LVIDBH(sb))
{
struct logicalVolHeaderDesc *lvhd;
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index b01804b..b823814 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -248,7 +248,7 @@
if (likely(cur_index != index)) {
page = ufs_get_locked_page(mapping, index);
- if (IS_ERR(page))
+ if (!page || IS_ERR(page)) /* it was truncated or EIO */
continue;
} else
page = locked_page;
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index 337cf2c..22f820a 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -251,12 +251,12 @@
{
struct page *page;
-try_again:
page = find_lock_page(mapping, index);
if (!page) {
page = read_cache_page(mapping, index,
(filler_t*)mapping->a_ops->readpage,
NULL);
+
if (IS_ERR(page)) {
printk(KERN_ERR "ufs_change_blocknr: "
"read_cache_page error: ino %lu, index: %lu\n",
@@ -266,6 +266,14 @@
lock_page(page);
+ if (unlikely(page->mapping == NULL)) {
+ /* Truncate got there first */
+ unlock_page(page);
+ page_cache_release(page);
+ page = NULL;
+ goto out;
+ }
+
if (!PageUptodate(page) || PageError(page)) {
unlock_page(page);
page_cache_release(page);
@@ -275,15 +283,8 @@
mapping->host->i_ino, index);
page = ERR_PTR(-EIO);
- goto out;
}
}
-
- if (unlikely(!page->mapping || !page_has_buffers(page))) {
- unlock_page(page);
- page_cache_release(page);
- goto try_again;/*we really need these buffers*/
- }
out:
return page;
}
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h
index 894bc4d..6a33a07 100644
--- a/include/asm-ia64/meminit.h
+++ b/include/asm-ia64/meminit.h
@@ -56,6 +56,11 @@
extern struct page *vmem_map;
extern int find_largest_hole (u64 start, u64 end, void *arg);
extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
+ extern int vmemmap_find_next_valid_pfn(int, int);
+#else
+static inline int vmemmap_find_next_valid_pfn(int node, int i)
+{
+ return i + 1;
+}
#endif
-
#endif /* meminit_h */
diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h
index 37e52a2..20a8d61 100644
--- a/include/asm-ia64/pal.h
+++ b/include/asm-ia64/pal.h
@@ -1433,7 +1433,12 @@
} pal_version_u_t;
-/* Return PAL version information */
+/*
+ * Return PAL version information. While the documentation states that
+ * PAL_VERSION can be called in either physical or virtual mode, some
+ * implementations only allow physical calls. We don't call it very often,
+ * so the overhead isn't worth eliminating.
+ */
static inline s64
ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
{
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 8406f1e..b72af59 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -1124,8 +1124,8 @@
#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
-#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
-#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010)
+#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x0f0f0f0f0f0f0f0f))
+#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & __IA64_UL_CONST(0x1010101010101010))
static inline void
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index fc9677b..384fbf7 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -24,7 +24,7 @@
* 0xa000000000000000+2*PERCPU_PAGE_SIZE
* - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
*/
-#define KERNEL_START (GATE_ADDR+0x100000000)
+#define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
#ifndef __ASSEMBLY__
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b27d7de..64f9f9e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -327,21 +327,31 @@
extern void audit_putname(const char *name);
extern void __audit_inode(const char *name, const struct inode *inode);
extern void __audit_inode_child(const char *dname, const struct inode *inode,
- unsigned long pino);
+ const struct inode *parent);
+extern void __audit_inode_update(const struct inode *inode);
+static inline int audit_dummy_context(void)
+{
+ void *p = current->audit_context;
+ return !p || *(int *)p;
+}
static inline void audit_getname(const char *name)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
__audit_getname(name);
}
static inline void audit_inode(const char *name, const struct inode *inode) {
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
__audit_inode(name, inode);
}
static inline void audit_inode_child(const char *dname,
- const struct inode *inode,
- unsigned long pino) {
- if (unlikely(current->audit_context))
- __audit_inode_child(dname, inode, pino);
+ const struct inode *inode,
+ const struct inode *parent) {
+ if (unlikely(!audit_dummy_context()))
+ __audit_inode_child(dname, inode, parent);
+}
+static inline void audit_inode_update(const struct inode *inode) {
+ if (unlikely(!audit_dummy_context()))
+ __audit_inode_update(inode);
}
/* Private API (for audit.c only) */
@@ -365,57 +375,61 @@
static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_ipc_obj(ipcp);
return 0;
}
static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_ipc_set_perm(qbytes, uid, gid, mode);
return 0;
}
static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_open(oflag, mode, u_attr);
return 0;
}
static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout);
return 0;
}
static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout);
return 0;
}
static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_notify(mqdes, u_notification);
return 0;
}
static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
{
- if (unlikely(current->audit_context))
+ if (unlikely(!audit_dummy_context()))
return __audit_mq_getsetattr(mqdes, mqstat);
return 0;
}
+extern int audit_n_rules;
#else
#define audit_alloc(t) ({ 0; })
#define audit_free(t) do { ; } while (0)
#define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0)
#define audit_syscall_exit(f,r) do { ; } while (0)
+#define audit_dummy_context() 1
#define audit_getname(n) do { ; } while (0)
#define audit_putname(n) do { ; } while (0)
#define __audit_inode(n,i) do { ; } while (0)
#define __audit_inode_child(d,i,p) do { ; } while (0)
+#define __audit_inode_update(i) do { ; } while (0)
#define audit_inode(n,i) do { ; } while (0)
#define audit_inode_child(d,i,p) do { ; } while (0)
+#define audit_inode_update(i) do { ; } while (0)
#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
#define audit_get_loginuid(c) ({ -1; })
#define audit_ipc_obj(i) ({ 0; })
@@ -430,6 +444,7 @@
#define audit_mq_timedreceive(d,l,p,t) ({ 0; })
#define audit_mq_notify(d,n) ({ 0; })
#define audit_mq_getsetattr(d,s) ({ 0; })
+#define audit_n_rules 0
#endif
#ifdef CONFIG_AUDIT
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 6a70478..88dafa2 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_DEBUG_LOCKING_H
#define __LINUX_DEBUG_LOCKING_H
+struct task_struct;
+
extern int debug_locks;
extern int debug_locks_silent;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index cc5dec7..d4f219f 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -67,7 +67,7 @@
if (source) {
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
}
- audit_inode_child(new_name, source, new_dir->i_ino);
+ audit_inode_child(new_name, source, new_dir);
}
/*
@@ -98,7 +98,7 @@
inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode);
- audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino);
+ audit_inode_child(dentry->d_name.name, dentry->d_inode, inode);
}
/*
@@ -109,7 +109,7 @@
inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0,
dentry->d_name.name, dentry->d_inode);
- audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino);
+ audit_inode_child(dentry->d_name.name, dentry->d_inode, inode);
}
/*
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 0503b2e..2d22932 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -46,8 +46,6 @@
KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
- KOBJ_UNDOCK = (__force kobject_action_t) 0x08, /* undocking */
- KOBJ_DOCK = (__force kobject_action_t) 0x09, /* dock */
};
struct kobject {
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index aa4fe90..0d92c46 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -123,7 +123,6 @@
unsigned int b_id; /* block id */
unsigned char b_queued; /* re-queued */
unsigned char b_granted; /* VFS granted lock */
- unsigned char b_done; /* callback complete */
struct nlm_file * b_file; /* file in question */
};
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 55ea853..2474345 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -476,10 +476,9 @@
}
/*
- * Allocate and free nfs_write_data structures
+ * Allocate nfs_write_data structures
*/
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount);
-extern void nfs_writedata_free(struct nfs_write_data *p);
/*
* linux/fs/nfs/read.c
@@ -491,10 +490,9 @@
extern void nfs_readdata_release(void *data);
/*
- * Allocate and free nfs_read_data structures
+ * Allocate nfs_read_data structures
*/
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount);
-extern void nfs_readdata_free(struct nfs_read_data *p);
/*
* linux/fs/nfs3proc.c
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c09396d..4eae06b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2142,6 +2142,7 @@
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
+#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6afa72e..6674fc1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1558,6 +1558,14 @@
}
/*
+ * Sometimes we may need to cancel the previous 'freeze' request
+ */
+static inline void do_not_freeze(struct task_struct *p)
+{
+ p->flags &= ~PF_FREEZE;
+}
+
+/*
* Wake up a frozen process
*/
static inline int thaw_process(struct task_struct *p)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index e8bbe81..840e47a 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -229,7 +229,7 @@
int xprt_reserve_xprt_cong(struct rpc_task *task);
int xprt_prepare_transmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
-void xprt_abort_transmit(struct rpc_task *task);
+void xprt_end_transmit(struct rpc_task *task);
int xprt_adjust_timeout(struct rpc_rqst *req);
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1ab806c..2d9b1b6 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -41,23 +41,23 @@
static inline void __count_vm_event(enum vm_event_item item)
{
- __get_cpu_var(vm_event_states.event[item])++;
+ __get_cpu_var(vm_event_states).event[item]++;
}
static inline void count_vm_event(enum vm_event_item item)
{
- get_cpu_var(vm_event_states.event[item])++;
+ get_cpu_var(vm_event_states).event[item]++;
put_cpu();
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __get_cpu_var(vm_event_states.event[item]) += delta;
+ __get_cpu_var(vm_event_states).event[item] += delta;
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- get_cpu_var(vm_event_states.event[item]) += delta;
+ get_cpu_var(vm_event_states).event[item] += delta;
put_cpu();
}
diff --git a/include/net/red.h b/include/net/red.h
index 5ccdbb3..a4eb379 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -212,7 +212,7 @@
* Seems, it is the best solution to
* problem of too coarse exponent tabulation.
*/
- us_idle = (p->qavg * us_idle) >> p->Scell_log;
+ us_idle = (p->qavg * (u64)us_idle) >> p->Scell_log;
if (us_idle < (p->qavg >> 1))
return p->qavg - us_idle;
diff --git a/kernel/audit.c b/kernel/audit.c
index d417ca1..0a36091e 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -690,9 +690,7 @@
/* Initialize audit support at boot time. */
static int __init audit_init(void)
{
-#ifdef CONFIG_AUDITSYSCALL
int i;
-#endif
printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
audit_default ? "enabled" : "disabled");
@@ -717,10 +715,10 @@
audit_ih = inotify_init(&audit_inotify_ops);
if (IS_ERR(audit_ih))
audit_panic("cannot initialize inotify handle");
+#endif
for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
INIT_LIST_HEAD(&audit_inode_hash[i]);
-#endif
return 0;
}
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 5b4e162..6a9a5c5a 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -442,6 +442,7 @@
case AUDIT_EQUAL:
break;
default:
+ err = -EINVAL;
goto exit_free;
}
}
@@ -579,6 +580,7 @@
case AUDIT_EQUAL:
break;
default:
+ err = -EINVAL;
goto exit_free;
}
}
@@ -1134,6 +1136,14 @@
struct audit_watch *watch = entry->rule.watch;
struct nameidata *ndp, *ndw;
int h, err, putnd_needed = 0;
+#ifdef CONFIG_AUDITSYSCALL
+ int dont_count = 0;
+
+ /* If either of these, don't count towards total */
+ if (entry->rule.listnr == AUDIT_FILTER_USER ||
+ entry->rule.listnr == AUDIT_FILTER_TYPE)
+ dont_count = 1;
+#endif
if (inode_f) {
h = audit_hash_ino(inode_f->val);
@@ -1174,6 +1184,10 @@
} else {
list_add_tail_rcu(&entry->list, list);
}
+#ifdef CONFIG_AUDITSYSCALL
+ if (!dont_count)
+ audit_n_rules++;
+#endif
mutex_unlock(&audit_filter_mutex);
if (putnd_needed)
@@ -1198,6 +1212,14 @@
struct audit_watch *watch, *tmp_watch = entry->rule.watch;
LIST_HEAD(inotify_list);
int h, ret = 0;
+#ifdef CONFIG_AUDITSYSCALL
+ int dont_count = 0;
+
+ /* If either of these, don't count towards total */
+ if (entry->rule.listnr == AUDIT_FILTER_USER ||
+ entry->rule.listnr == AUDIT_FILTER_TYPE)
+ dont_count = 1;
+#endif
if (inode_f) {
h = audit_hash_ino(inode_f->val);
@@ -1235,6 +1257,10 @@
list_del_rcu(&e->list);
call_rcu(&e->rcu, audit_free_rule_rcu);
+#ifdef CONFIG_AUDITSYSCALL
+ if (!dont_count)
+ audit_n_rules--;
+#endif
mutex_unlock(&audit_filter_mutex);
if (!list_empty(&inotify_list))
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index ae40ac8..efc1b74 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -85,6 +85,9 @@
/* Indicates that audit should log the full pathname. */
#define AUDIT_NAME_FULL -1
+/* number of audit rules */
+int audit_n_rules;
+
/* When fs/namei.c:getname() is called, we store the pointer in name and
* we don't let putname() free it (instead we free all of the saved
* pointers at syscall exit time).
@@ -174,6 +177,7 @@
/* The per-task audit context. */
struct audit_context {
+ int dummy; /* must be the first element */
int in_syscall; /* 1 if task is in a syscall */
enum audit_state state;
unsigned int serial; /* serial number for record */
@@ -514,7 +518,7 @@
context->return_valid = return_valid;
context->return_code = return_code;
- if (context->in_syscall && !context->auditable) {
+ if (context->in_syscall && !context->dummy && !context->auditable) {
enum audit_state state;
state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]);
@@ -530,17 +534,7 @@
}
get_context:
- context->pid = tsk->pid;
- context->ppid = sys_getppid(); /* sic. tsk == current in all cases */
- context->uid = tsk->uid;
- context->gid = tsk->gid;
- context->euid = tsk->euid;
- context->suid = tsk->suid;
- context->fsuid = tsk->fsuid;
- context->egid = tsk->egid;
- context->sgid = tsk->sgid;
- context->fsgid = tsk->fsgid;
- context->personality = tsk->personality;
+
tsk->audit_context = NULL;
return context;
}
@@ -749,6 +743,17 @@
const char *tty;
/* tsk == current */
+ context->pid = tsk->pid;
+ context->ppid = sys_getppid(); /* sic. tsk == current in all cases */
+ context->uid = tsk->uid;
+ context->gid = tsk->gid;
+ context->euid = tsk->euid;
+ context->suid = tsk->suid;
+ context->fsuid = tsk->fsuid;
+ context->egid = tsk->egid;
+ context->sgid = tsk->sgid;
+ context->fsgid = tsk->fsgid;
+ context->personality = tsk->personality;
ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL);
if (!ab)
@@ -1066,7 +1071,8 @@
context->argv[3] = a4;
state = context->state;
- if (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT)
+ context->dummy = !audit_n_rules;
+ if (!context->dummy && (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT))
state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]);
if (likely(state == AUDIT_DISABLED))
return;
@@ -1199,14 +1205,18 @@
#endif
}
-static void audit_inode_context(int idx, const struct inode *inode)
+/* Copy inode data into an audit_names. */
+static void audit_copy_inode(struct audit_names *name, const struct inode *inode)
{
- struct audit_context *context = current->audit_context;
-
- selinux_get_inode_sid(inode, &context->names[idx].osid);
+ name->ino = inode->i_ino;
+ name->dev = inode->i_sb->s_dev;
+ name->mode = inode->i_mode;
+ name->uid = inode->i_uid;
+ name->gid = inode->i_gid;
+ name->rdev = inode->i_rdev;
+ selinux_get_inode_sid(inode, &name->osid);
}
-
/**
* audit_inode - store the inode and device from a lookup
* @name: name being audited
@@ -1240,20 +1250,14 @@
++context->ino_count;
#endif
}
- context->names[idx].ino = inode->i_ino;
- context->names[idx].dev = inode->i_sb->s_dev;
- context->names[idx].mode = inode->i_mode;
- context->names[idx].uid = inode->i_uid;
- context->names[idx].gid = inode->i_gid;
- context->names[idx].rdev = inode->i_rdev;
- audit_inode_context(idx, inode);
+ audit_copy_inode(&context->names[idx], inode);
}
/**
* audit_inode_child - collect inode info for created/removed objects
* @dname: inode's dentry name
* @inode: inode being audited
- * @pino: inode number of dentry parent
+ * @parent: inode of dentry parent
*
* For syscalls that create or remove filesystem objects, audit_inode
* can only collect information for the filesystem object's parent.
@@ -1264,7 +1268,7 @@
* unsuccessful attempts.
*/
void __audit_inode_child(const char *dname, const struct inode *inode,
- unsigned long pino)
+ const struct inode *parent)
{
int idx;
struct audit_context *context = current->audit_context;
@@ -1278,7 +1282,7 @@
if (!dname)
goto update_context;
for (idx = 0; idx < context->name_count; idx++)
- if (context->names[idx].ino == pino) {
+ if (context->names[idx].ino == parent->i_ino) {
const char *name = context->names[idx].name;
if (!name)
@@ -1302,16 +1306,47 @@
context->names[idx].name_len = AUDIT_NAME_FULL;
context->names[idx].name_put = 0; /* don't call __putname() */
- if (inode) {
- context->names[idx].ino = inode->i_ino;
- context->names[idx].dev = inode->i_sb->s_dev;
- context->names[idx].mode = inode->i_mode;
- context->names[idx].uid = inode->i_uid;
- context->names[idx].gid = inode->i_gid;
- context->names[idx].rdev = inode->i_rdev;
- audit_inode_context(idx, inode);
- } else
- context->names[idx].ino = (unsigned long)-1;
+ if (!inode)
+ context->names[idx].ino = (unsigned long)-1;
+ else
+ audit_copy_inode(&context->names[idx], inode);
+
+ /* A parent was not found in audit_names, so copy the inode data for the
+ * provided parent. */
+ if (!found_name) {
+ idx = context->name_count++;
+#if AUDIT_DEBUG
+ context->ino_count++;
+#endif
+ audit_copy_inode(&context->names[idx], parent);
+ }
+}
+
+/**
+ * audit_inode_update - update inode info for last collected name
+ * @inode: inode being audited
+ *
+ * When open() is called on an existing object with the O_CREAT flag, the inode
+ * data audit initially collects is incorrect. This additional hook ensures
+ * audit has the inode data for the actual object to be opened.
+ */
+void __audit_inode_update(const struct inode *inode)
+{
+ struct audit_context *context = current->audit_context;
+ int idx;
+
+ if (!context->in_syscall || !inode)
+ return;
+
+ if (context->name_count == 0) {
+ context->name_count++;
+#if AUDIT_DEBUG
+ context->ino_count++;
+#endif
+ }
+ idx = context->name_count - 1;
+
+ audit_copy_inode(&context->names[idx], inode);
}
/**
@@ -1642,7 +1677,7 @@
unsigned long p, next;
void *to;
- if (likely(!audit_enabled || !context))
+ if (likely(!audit_enabled || !context || context->dummy))
return 0;
ax = kmalloc(sizeof(*ax) + PAGE_SIZE * MAX_ARG_PAGES - bprm->p,
@@ -1680,7 +1715,7 @@
struct audit_aux_data_socketcall *ax;
struct audit_context *context = current->audit_context;
- if (likely(!context))
+ if (likely(!context || context->dummy))
return 0;
ax = kmalloc(sizeof(*ax) + nargs * sizeof(unsigned long), GFP_KERNEL);
@@ -1708,7 +1743,7 @@
struct audit_aux_data_sockaddr *ax;
struct audit_context *context = current->audit_context;
- if (likely(!context))
+ if (likely(!context || context->dummy))
return 0;
ax = kmalloc(sizeof(*ax) + len, GFP_KERNEL);
diff --git a/kernel/fork.c b/kernel/fork.c
index 1b0f7b1..aa36c43 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1387,8 +1387,10 @@
if (clone_flags & CLONE_VFORK) {
wait_for_completion(&vfork);
- if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
+ if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
+ current->ptrace_message = nr;
ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
+ }
}
} else {
free_pid(pid);
diff --git a/kernel/futex.c b/kernel/futex.c
index dda2049..c2b2e0b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -948,6 +948,7 @@
/* In the common case we don't take the spinlock, which is nice. */
retry:
lock_ptr = q->lock_ptr;
+ barrier();
if (lock_ptr != 0) {
spin_lock(lock_ptr);
/*
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index d1aab1a..c5cca3f 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -39,7 +39,7 @@
{
struct compat_robust_list_head __user *head = curr->compat_robust_list;
struct robust_list __user *entry, *pending;
- unsigned int limit = ROBUST_LIST_LIMIT, pi;
+ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
compat_uptr_t uentry, upending;
compat_long_t futex_offset;
@@ -59,10 +59,10 @@
* if it exists:
*/
if (fetch_robust_entry(&upending, &pending,
- &head->list_op_pending, &pi))
+ &head->list_op_pending, &pip))
return;
if (upending)
- handle_futex_death((void *)pending + futex_offset, curr, pi);
+ handle_futex_death((void *)pending + futex_offset, curr, pip);
while (compat_ptr(uentry) != &head->list) {
/*
diff --git a/kernel/power/process.c b/kernel/power/process.c
index b2a5f67..72e72d2 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -66,13 +66,25 @@
}
}
+static void cancel_freezing(struct task_struct *p)
+{
+ unsigned long flags;
+
+ if (freezing(p)) {
+ pr_debug(" clean up: %s\n", p->comm);
+ do_not_freeze(p);
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ recalc_sigpending_tsk(p);
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ }
+}
+
/* 0 = success, else # of processes that we failed to stop */
int freeze_processes(void)
{
int todo, nr_user, user_frozen;
unsigned long start_time;
struct task_struct *g, *p;
- unsigned long flags;
printk( "Stopping tasks: " );
start_time = jiffies;
@@ -85,6 +97,10 @@
continue;
if (frozen(p))
continue;
+ if (p->state == TASK_TRACED && frozen(p->parent)) {
+ cancel_freezing(p);
+ continue;
+ }
if (p->mm && !(p->flags & PF_BORROWED_MM)) {
/* The task is a user-space one.
* Freeze it unless there's a vfork completion
@@ -126,13 +142,7 @@
do_each_thread(g, p) {
if (freezeable(p) && !frozen(p))
printk(KERN_ERR " %s\n", p->comm);
- if (freezing(p)) {
- pr_debug(" clean up: %s\n", p->comm);
- p->flags &= ~PF_FREEZE;
- spin_lock_irqsave(&p->sighand->siglock, flags);
- recalc_sigpending_tsk(p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- }
+ cancel_freezing(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
return todo;
diff --git a/kernel/printk.c b/kernel/printk.c
index 65ca068..1149365 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -799,6 +799,9 @@
up(&secondary_console_sem);
return;
}
+
+ console_may_schedule = 0;
+
for ( ; ; ) {
spin_lock_irqsave(&logbuf_lock, flags);
wake_klogd |= log_start - log_end;
@@ -812,7 +815,6 @@
local_irq_restore(flags);
}
console_locked = 0;
- console_may_schedule = 0;
up(&console_sem);
spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) {
diff --git a/kernel/resource.c b/kernel/resource.c
index 0dd3a85..4628643 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -244,6 +244,7 @@
start = res->start;
end = res->end;
+ BUG_ON(start >= end);
read_lock(&resource_lock);
for (p = iomem_resource.child; p ; p = p->sibling) {
@@ -254,15 +255,17 @@
p = NULL;
break;
}
- if (p->start >= start)
+ if ((p->end >= start) && (p->start < end))
break;
}
read_unlock(&resource_lock);
if (!p)
return -1;
/* copy data */
- res->start = p->start;
- res->end = p->end;
+ if (res->start < p->start)
+ res->start = p->start;
+ if (res->end > p->end)
+ res->end = p->end;
return 0;
}
#endif
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2b1530f..7f20e7b 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -50,10 +50,6 @@
return "offline";
case KOBJ_ONLINE:
return "online";
- case KOBJ_DOCK:
- return "dock";
- case KOBJ_UNDOCK:
- return "undock";
default:
return NULL;
}
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 3d9c4dc..58c577d 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -162,6 +162,7 @@
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
+#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
static void __read_lock_debug(rwlock_t *lock)
{
int print_once = 1;
@@ -184,12 +185,12 @@
}
}
}
+#endif
void _raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
- __read_lock_debug(lock);
+ __raw_read_lock(&lock->raw_lock);
}
int _raw_read_trylock(rwlock_t *lock)
@@ -235,6 +236,7 @@
lock->owner_cpu = -1;
}
+#if 0 /* This can cause lockups */
static void __write_lock_debug(rwlock_t *lock)
{
int print_once = 1;
@@ -257,12 +259,12 @@
}
}
}
+#endif
void _raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
- if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
- __write_lock_debug(lock);
+ __raw_write_lock(&lock->raw_lock);
debug_write_lock_after(lock);
}
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 60a5d55..168c78a 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -73,7 +73,6 @@
file->f_ra.ra_pages = bdi->ra_pages * 2;
break;
case POSIX_FADV_WILLNEED:
- case POSIX_FADV_NOREUSE:
if (!mapping->a_ops->readpage) {
ret = -EINVAL;
break;
@@ -94,6 +93,8 @@
if (ret > 0)
ret = 0;
break;
+ case POSIX_FADV_NOREUSE:
+ break;
case POSIX_FADV_DONTNEED:
if (!bdi_write_congested(mapping->backing_dev_info))
filemap_flush(mapping);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 01c9fb9..c373195 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -52,6 +52,9 @@
int nr_pages = PAGES_PER_SECTION;
int ret;
+ if (pfn_valid(phys_start_pfn))
+ return -EEXIST;
+
ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
if (ret < 0)
@@ -76,15 +79,22 @@
{
unsigned long i;
int err = 0;
+ int start_sec, end_sec;
+ /* during initialize mem_map, align hot-added range to section */
+ start_sec = pfn_to_section_nr(phys_start_pfn);
+ end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
- for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
- err = __add_section(zone, phys_start_pfn + i);
+ for (i = start_sec; i <= end_sec; i++) {
+ err = __add_section(zone, i << PFN_SECTION_SHIFT);
- /* We want to keep adding the rest of the
- * sections if the first ones already exist
+ /*
+ * EEXIST is finally dealed with by ioresource collision
+ * check. see add_memory() => register_memory_resource()
+ * Warning will be printed if there is collision.
*/
if (err && (err != -EEXIST))
break;
+ err = 0;
}
return err;
@@ -156,7 +166,7 @@
res.flags = IORESOURCE_MEM; /* we just need system ram */
section_end = res.end;
- while (find_next_system_ram(&res) >= 0) {
+ while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
nr_pages = (unsigned long)
((res.end + 1 - res.start) >> PAGE_SHIFT);
@@ -213,10 +223,9 @@
}
/* add this memory to iomem resource */
-static void register_memory_resource(u64 start, u64 size)
+static struct resource *register_memory_resource(u64 start, u64 size)
{
struct resource *res;
-
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(!res);
@@ -228,7 +237,18 @@
printk("System RAM resource %llx - %llx cannot be added\n",
(unsigned long long)res->start, (unsigned long long)res->end);
kfree(res);
+ res = NULL;
}
+ return res;
+}
+
+static void release_memory_resource(struct resource *res)
+{
+ if (!res)
+ return;
+ release_resource(res);
+ kfree(res);
+ return;
}
@@ -237,8 +257,13 @@
{
pg_data_t *pgdat = NULL;
int new_pgdat = 0;
+ struct resource *res;
int ret;
+ res = register_memory_resource(start, size);
+ if (!res)
+ return -EEXIST;
+
if (!node_online(nid)) {
pgdat = hotadd_new_pgdat(nid, start);
if (!pgdat)
@@ -268,14 +293,13 @@
BUG_ON(ret);
}
- /* register this memory as resource */
- register_memory_resource(start, size);
-
return ret;
error:
/* rollback pgdat allocation and others */
if (new_pgdat)
rollback_node_hotadd(nid, pgdat);
+ if (res)
+ release_memory_resource(res);
return ret;
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 06abb66..53086fb 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -85,7 +85,7 @@
goto err_out;
err = br_fill_ifinfo(skb, port, current->pid, 0, event, 0);
- if (err)
+ if (err < 0)
goto err_kfree;
NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 738dad9..104af5d5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3541,7 +3541,8 @@
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
/* Limited by application or receiver window. */
- u32 win_used = max(tp->snd_cwnd_used, 2U);
+ u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
+ u32 win_used = max(tp->snd_cwnd_used, init_win);
if (win_used < tp->snd_cwnd) {
tp->snd_ssthresh = tcp_current_ssthresh(sk);
tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index d504eed..7e6bc41 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -238,11 +238,13 @@
goto out_put;
if (lapb->state == LAPB_STATE_0) {
- if (((parms->mode & LAPB_EXTENDED) &&
- (parms->window < 1 || parms->window > 127)) ||
- (parms->window < 1 || parms->window > 7))
- goto out_put;
-
+ if (parms->mode & LAPB_EXTENDED) {
+ if (parms->window < 1 || parms->window > 127)
+ goto out_put;
+ } else {
+ if (parms->window < 1 || parms->window > 7)
+ goto out_put;
+ }
lapb->mode = parms->mode;
lapb->window = parms->window;
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index d6cfe84..2652ead 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -784,24 +784,20 @@
copied += used;
len -= used;
- if (used + offset < skb->len)
- continue;
-
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0);
*seq = 0;
}
+
+ /* For non stream protcols we get one packet per recvmsg call */
+ if (sk->sk_type != SOCK_STREAM)
+ goto copy_uaddr;
+
+ /* Partial read */
+ if (used + offset < skb->len)
+ continue;
} while (len > 0);
- /*
- * According to UNIX98, msg_name/msg_namelen are ignored
- * on connected socket. -ANK
- * But... af_llc still doesn't have separate sets of methods for
- * SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will
- * eventually fix this tho :-) -acme
- */
- if (sk->sk_type == SOCK_DGRAM)
- goto copy_uaddr;
out:
release_sock(sk);
return copied;
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 20c4eb5..42eb0c3 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -51,10 +51,10 @@
{
struct sockaddr_llc *addr;
- if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */
- return;
/* save primitive for use by the user. */
addr = llc_ui_skb_cb(skb);
+
+ memset(addr, 0, sizeof(*addr));
addr->sllc_family = sk->sk_family;
addr->sllc_arphrd = skb->dev->type;
addr->sllc_test = prim == LLC_TEST_PRIM;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c7844ba..a19eff1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -430,7 +430,7 @@
}
#endif
- err = -EINVAL;
+ err = -ENOENT;
if (ops == NULL)
goto err_out;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 7026b08..00cb388 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -71,7 +71,12 @@
new = detail->alloc();
if (!new)
return NULL;
+ /* must fully initialise 'new', else
+ * we might get lose if we need to
+ * cache_put it soon.
+ */
cache_init(new);
+ detail->init(new, key);
write_lock(&detail->hash_lock);
@@ -85,7 +90,6 @@
return tmp;
}
}
- detail->init(new, key);
new->next = *head;
*head = new;
detail->entries++;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 4ba271f..d6409e7 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -921,26 +921,43 @@
task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status != 0)
return;
+ task->tk_action = call_transmit_status;
/* Encode here so that rpcsec_gss can use correct sequence number. */
if (rpc_task_need_encode(task)) {
- task->tk_rqstp->rq_bytes_sent = 0;
+ BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
call_encode(task);
/* Did the encode result in an error condition? */
if (task->tk_status != 0)
- goto out_nosend;
+ return;
}
- task->tk_action = call_transmit_status;
xprt_transmit(task);
if (task->tk_status < 0)
return;
- if (!task->tk_msg.rpc_proc->p_decode) {
- task->tk_action = rpc_exit_task;
- rpc_wake_up_task(task);
- }
- return;
-out_nosend:
- /* release socket write lock before attempting to handle error */
- xprt_abort_transmit(task);
+ /*
+ * On success, ensure that we call xprt_end_transmit() before sleeping
+ * in order to allow access to the socket to other RPC requests.
+ */
+ call_transmit_status(task);
+ if (task->tk_msg.rpc_proc->p_decode != NULL)
+ return;
+ task->tk_action = rpc_exit_task;
+ rpc_wake_up_task(task);
+}
+
+/*
+ * 5a. Handle cleanup after a transmission
+ */
+static void
+call_transmit_status(struct rpc_task *task)
+{
+ task->tk_action = call_status;
+ /*
+ * Special case: if we've been waiting on the socket's write_space()
+ * callback, then don't call xprt_end_transmit().
+ */
+ if (task->tk_status == -EAGAIN)
+ return;
+ xprt_end_transmit(task);
rpc_task_force_reencode(task);
}
@@ -992,18 +1009,7 @@
}
/*
- * 6a. Handle transmission errors.
- */
-static void
-call_transmit_status(struct rpc_task *task)
-{
- if (task->tk_status != -EAGAIN)
- rpc_task_force_reencode(task);
- call_status(task);
-}
-
-/*
- * 6b. Handle RPC timeout
+ * 6a. Handle RPC timeout
* We do not release the request slot, so we keep using the
* same XID for all retransmits.
*/
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index dc6cb93..a3bd2db 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -667,10 +667,11 @@
RPCAUTH_info, RPCAUTH_EOF);
if (error)
goto err_depopulate;
+ dget(dentry);
out:
mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
- return dget(dentry);
+ return dentry;
err_depopulate:
rpc_depopulate(dentry);
__rpc_rmdir(dir, dentry);
@@ -731,10 +732,11 @@
rpci->flags = flags;
rpci->ops = ops;
inode_dir_notify(dir, DN_CREATE);
+ dget(dentry);
out:
mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
- return dget(dentry);
+ return dentry;
err_dput:
dput(dentry);
dentry = ERR_PTR(-ENOMEM);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 313b68d..e8c2bc4 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -707,12 +707,9 @@
return err;
}
-void
-xprt_abort_transmit(struct rpc_task *task)
+void xprt_end_transmit(struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
-
- xprt_release_write(xprt, task);
+ xprt_release_write(task->tk_xprt, task);
}
/**
@@ -761,8 +758,6 @@
task->tk_status = -ENOTCONN;
else if (!req->rq_received)
rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
-
- xprt->ops->release_xprt(xprt, task);
spin_unlock_bh(&xprt->transport_lock);
return;
}
@@ -772,18 +767,8 @@
* schedq, and being picked up by a parallel run of rpciod().
*/
task->tk_status = status;
-
- switch (status) {
- case -ECONNREFUSED:
+ if (status == -ECONNREFUSED)
rpc_sleep_on(&xprt->sending, task, NULL, NULL);
- case -EAGAIN:
- case -ENOTCONN:
- return;
- default:
- break;
- }
- xprt_release_write(xprt, task);
- return;
}
static inline void do_xprt_reserve(struct rpc_task *task)
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ee678ed..441bd53 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -414,6 +414,33 @@
}
/**
+ * xs_tcp_release_xprt - clean up after a tcp transmission
+ * @xprt: transport
+ * @task: rpc task
+ *
+ * This cleans up if an error causes us to abort the transmission of a request.
+ * In this case, the socket may need to be reset in order to avoid confusing
+ * the server.
+ */
+static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ struct rpc_rqst *req;
+
+ if (task != xprt->snd_task)
+ return;
+ if (task == NULL)
+ goto out_release;
+ req = task->tk_rqstp;
+ if (req->rq_bytes_sent == 0)
+ goto out_release;
+ if (req->rq_bytes_sent == req->rq_snd_buf.len)
+ goto out_release;
+ set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
+out_release:
+ xprt_release_xprt(xprt, task);
+}
+
+/**
* xs_close - close a socket
* @xprt: transport
*
@@ -1250,7 +1277,7 @@
static struct rpc_xprt_ops xs_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
- .release_xprt = xprt_release_xprt,
+ .release_xprt = xs_tcp_release_xprt,
.set_port = xs_set_port,
.connect = xs_connect,
.buf_alloc = rpc_malloc,