Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: Fix update_curr_rt()
sched, docs: Update schedstats documentation to version 15
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index b959659..b3f35e5 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -603,3 +603,19 @@
Who: Jean Delvare <khali@linux-fr.org>
----------------------------
+
+What: noswapaccount kernel command line parameter
+When: 2.6.40
+Why: The original implementation of memsw feature enabled by
+ CONFIG_CGROUP_MEM_RES_CTLR_SWAP could be disabled by the noswapaccount
+ kernel parameter (introduced in 2.6.29-rc1). Later on, this decision
+ turned out to be not ideal because we cannot have the feature compiled
+ in and disabled by default and let only interested to enable it
+ (e.g. general distribution kernels might need it). Therefore we have
+ added swapaccount[=0|1] parameter (introduced in 2.6.37) which provides
+ the both possibilities. If we remove noswapaccount we will have
+ less command line parameters with the same functionality and we
+ can also cleanup the parameter handling a bit ().
+Who: Michal Hocko <mhocko@suse.cz>
+
+----------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 445537d..9511bff 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -978,6 +978,8 @@
F: arch/arm/plat-samsung/
F: arch/arm/plat-s3c24xx/
F: arch/arm/plat-s5p/
+F: drivers/*/*s3c2410*
+F: drivers/*/*/*s3c2410*
ARM/S3C2410 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
@@ -5614,18 +5616,20 @@
SIMTEC EB110ATX (Chalice CATS)
P: Ben Dooks
-M: Vincent Sanders <support@simtec.co.uk>
+P: Vincent Sanders <vince@simtec.co.uk>
+M: Simtec Linux Team <linux@simtec.co.uk>
W: http://www.simtec.co.uk/products/EB110ATX/
S: Supported
SIMTEC EB2410ITX (BAST)
P: Ben Dooks
-M: Vincent Sanders <support@simtec.co.uk>
+P: Vincent Sanders <vince@simtec.co.uk>
+M: Simtec Linux Team <linux@simtec.co.uk>
W: http://www.simtec.co.uk/products/EB2410ITX/
S: Supported
-F: arch/arm/mach-s3c2410/
-F: drivers/*/*s3c2410*
-F: drivers/*/*/*s3c2410*
+F: arch/arm/mach-s3c2410/mach-bast.c
+F: arch/arm/mach-s3c2410/bast-ide.c
+F: arch/arm/mach-s3c2410/bast-irq.c
TI DAVINCI MACHINE SUPPORT
M: Kevin Hilman <khilman@deeprootsystems.com>
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 4243400..0db20b5 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -77,8 +77,18 @@
We ensure r7 points to a valid FDT, just in case the bootloader
is broken or non-existent */
beqi r7, no_fdt_arg /* NULL pointer? don't copy */
- lw r11, r0, r7 /* Does r7 point to a */
- rsubi r11, r11, OF_DT_HEADER /* valid FDT? */
+/* Does r7 point to a valid FDT? Load HEADER magic number */
+ /* Run time Big/Little endian platform */
+ /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
+ addik r11, r0, 0x1 /* BIG/LITTLE checking value */
+ /* __bss_start will be zeroed later - it is just temp location */
+ swi r11, r0, TOPHYS(__bss_start)
+ lbui r11, r0, TOPHYS(__bss_start)
+ beqid r11, big_endian /* DO NOT break delay stop dependency */
+ lw r11, r0, r7 /* Big endian load in delay slot */
+ lwr r11, r0, r7 /* Little endian load */
+big_endian:
+ rsubi r11, r11, OF_DT_HEADER /* Check FDT header */
beqi r11, _prepare_copy_fdt
or r7, r0, r0 /* clear R7 when not valid DTB */
bnei r11, no_fdt_arg /* No - get out of here */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 25f6e07..782680d 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -147,10 +147,6 @@
#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
#define BSRLI(rD, rA, imm) \
bsrli rD, rA, imm
- #elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
- #define BSRLI(rD, rA, imm) \
- ori rD, r0, (1 << imm); \
- idivu rD, rD, rA
#else
#define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
/* Only the used shift constants defined here - add more if needed */
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index fdc48bb..62021d7 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -29,6 +29,10 @@
* between mem locations with size of xfer spec'd in bytes
*/
+#ifdef __MICROBLAZEEL__
+#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
+#endif
+
#include <linux/linkage.h>
.text
.globl memcpy
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff19efdf..636bcb8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -406,7 +406,7 @@
If unsure, say Y.
config CHSC_SCH
- def_tristate y
+ def_tristate m
prompt "Support for CHSC subchannels"
help
This driver allows usage of CHSC subchannels. A CHSC subchannel
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 405cc97..7e1f776 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -1,29 +1,8 @@
#ifndef _S390_CACHEFLUSH_H
#define _S390_CACHEFLUSH_H
-/* Keep includes the same across arches. */
-#include <linux/mm.h>
-
/* Caches aren't brain-dead on the s390. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index f1f644f..9074a54 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,6 +22,7 @@
*/
#include <linux/mm.h>
+#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 07deaee..a6c4f7e 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -125,9 +125,9 @@
unsigned long tmp1;
asm volatile(
+ " sacf 256\n"
" "AHI" %0,-1\n"
" jo 5f\n"
- " sacf 256\n"
" bras %3,3f\n"
"0:"AHI" %0,257\n"
"1: mvc 0(1,%1),0(%2)\n"
@@ -142,9 +142,8 @@
"3:"AHI" %0,-256\n"
" jnm 2b\n"
"4: ex %0,1b-0b(%3)\n"
- " sacf 0\n"
"5: "SLR" %0,%0\n"
- "6:\n"
+ "6: sacf 0\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
: : "cc", "memory");
@@ -156,9 +155,9 @@
unsigned long tmp1, tmp2;
asm volatile(
+ " sacf 256\n"
" "AHI" %0,-1\n"
" jo 5f\n"
- " sacf 256\n"
" bras %3,3f\n"
" xc 0(1,%1),0(%1)\n"
"0:"AHI" %0,257\n"
@@ -178,9 +177,8 @@
"3:"AHI" %0,-256\n"
" jnm 2b\n"
"4: ex %0,0(%3)\n"
- " sacf 0\n"
"5: "SLR" %0,%0\n"
- "6:\n"
+ "6: sacf 0\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0c719c6..e1850c2 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -336,7 +336,8 @@
page->flags ^= bits;
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
- list_move(&page->lru, &mm->context.pgtable_list);
+ if (!list_empty(&page->lru))
+ list_move(&page->lru, &mm->context.pgtable_list);
page = NULL;
} else
/* All fragments of the 4K page have been freed. */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index e56b9bf..f7a0993 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -682,7 +682,7 @@
* if an event is shared accross the logical threads
* the user needs special permissions to be able to use it
*/
- if (p4_event_bind_map[v].shared) {
+ if (p4_ht_active() && p4_event_bind_map[v].shared) {
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
@@ -727,7 +727,8 @@
event->hw.config = p4_set_ht_bit(event->hw.config);
if (event->attr.type == PERF_TYPE_RAW) {
-
+ struct p4_event_bind *bind;
+ unsigned int esel;
/*
* Clear bits we reserve to be managed by kernel itself
* and never allowed from a user space
@@ -743,6 +744,13 @@
* bits since we keep additional info here (for cache events and etc)
*/
event->hw.config |= event->attr.config;
+ bind = p4_config_get_bind(event->attr.config);
+ if (!bind) {
+ rc = -EINVAL;
+ goto out;
+ }
+ esel = P4_OPCODE_ESEL(bind->opcode);
+ event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
}
rc = x86_setup_perfctr(event);
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 09aea5f..70e60a4 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -1,11 +1,13 @@
config STUB_POULSBO
tristate "Intel GMA500 Stub Driver"
depends on PCI
+ depends on NET # for THERMAL
# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
+ select THERMAL if ACPI
help
Choose this option if you have a system that has Intel GMA500
(Poulsbo) integrated graphics. If M is selected, the module will
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index f011c5d..1c5cc65 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -1,4 +1,4 @@
-/* ir-lirc-codec.c - ir-core to classic lirc interface bridge
+/* ir-lirc-codec.c - rc-core to classic lirc interface bridge
*
* Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
*
@@ -47,6 +47,7 @@
/* Carrier reports */
if (ev.carrier_report) {
sample = LIRC_FREQUENCY(ev.carrier);
+ IR_dprintk(2, "carrier report (freq: %d)\n", sample);
/* Packet end */
} else if (ev.timeout) {
@@ -62,6 +63,7 @@
return 0;
sample = LIRC_TIMEOUT(ev.duration / 1000);
+ IR_dprintk(2, "timeout report (duration: %d)\n", sample);
/* Normal sample */
} else {
@@ -85,6 +87,8 @@
sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
LIRC_SPACE(ev.duration / 1000);
+ IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
+ TO_US(ev.duration), TO_STR(ev.pulse));
}
lirc_buffer_write(dev->raw->lirc.drv->rbuf,
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 3bf3337..2f5dc06 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -3,6 +3,9 @@
*
* Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
*
+ * See http://mediacenterguides.com/book/export/html/31 for details on
+ * key mappings.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -60,6 +63,9 @@
{ 0x800f0426, KEY_EPG }, /* Guide */
{ 0x800f0427, KEY_ZOOM }, /* Aspect */
+ { 0x800f0432, KEY_MODE }, /* Visualization */
+ { 0x800f0433, KEY_PRESENTATION }, /* Slide Show */
+ { 0x800f0434, KEY_EJECTCD },
{ 0x800f043a, KEY_BRIGHTNESSUP },
{ 0x800f0446, KEY_TV },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 079353e..6df0a49 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -816,7 +816,7 @@
switch (ir->buf_in[index]) {
/* 2-byte return value commands */
case MCE_CMD_S_TIMEOUT:
- ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2);
+ ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
break;
/* 1-byte return value commands */
@@ -855,9 +855,10 @@
break;
case PARSE_IRDATA:
ir->rem--;
+ init_ir_raw_event(&rawir);
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
- * MS_TO_US(MCE_TIME_UNIT);
+ * US_TO_NS(MCE_TIME_UNIT);
dev_dbg(ir->dev, "Storing %s with duration %d\n",
rawir.pulse ? "pulse" : "space",
@@ -883,6 +884,8 @@
i, ir->rem + 1, false);
if (ir->rem)
ir->parser_state = PARSE_IRDATA;
+ else
+ ir_raw_event_reset(ir->rc);
break;
}
@@ -1060,7 +1063,7 @@
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protos = RC_TYPE_ALL;
- rc->timeout = MS_TO_NS(1000);
+ rc->timeout = US_TO_NS(1000);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
rc->s_tx_carrier = mceusb_set_tx_carrier;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index dd4caf8..273d9d6 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -460,7 +460,7 @@
return 0;
}
- carrier = (count * 1000000) / duration;
+ carrier = MS_TO_NS(count) / duration;
if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
nvt_dbg("WTF? Carrier frequency out of range!");
@@ -612,8 +612,8 @@
sample = nvt->buf[i];
rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
- rawir.duration = (sample & BUF_LEN_MASK)
- * SAMPLE_PERIOD * 1000;
+ rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
+ * SAMPLE_PERIOD);
if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
if (nvt->rawir.pulse == rawir.pulse)
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 6e2911c..e435d94 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -164,7 +164,7 @@
sz->signal_start.tv_usec -
sz->signal_last.tv_usec);
rawir.duration -= sz->sum;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
rawir.duration &= IR_MAX_DURATION;
}
sz_push(sz, rawir);
@@ -177,7 +177,7 @@
rawir.duration = ((int) value) * SZ_RESOLUTION;
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
rawir.duration &= IR_MAX_DURATION;
sz_push(sz, rawir);
}
@@ -197,7 +197,7 @@
rawir.duration = ((int) value) * SZ_RESOLUTION;
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
- rawir.duration *= 1000;
+ rawir.duration = US_TO_NS(rawir.duration);
sz_push(sz, rawir);
}
@@ -273,6 +273,7 @@
if (sz->timeout_enabled)
sz_push(sz, rawir);
ir_raw_event_handle(sz->rdev);
+ ir_raw_event_reset(sz->rdev);
} else {
sz_push_full_space(sz, sz->buf_in[i]);
}
@@ -290,6 +291,7 @@
}
}
+ ir_raw_event_handle(sz->rdev);
usb_submit_urb(urb, GFP_ATOMIC);
return;
@@ -430,13 +432,13 @@
sz->decoder_state = PulseSpace;
/* FIXME: don't yet have a way to set this */
sz->timeout_enabled = true;
- sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
+ sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
IR_MAX_DURATION) | 0x03000000);
#if 0
/* not yet supported, depends on patches from maxim */
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
- sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
- sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
+ sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
+ sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
#endif
do_gettimeofday(&sz->signal_start);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 865216e..47236a5 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5793,7 +5793,7 @@
break;
default:
/* case 0xdd: * delay */
- msleep(action->val / 64 + 10);
+ msleep(action->idx);
break;
}
action++;
@@ -5830,7 +5830,7 @@
[SENSOR_GC0305] = gc0305_matrix,
[SENSOR_HDCS2020b] = NULL,
[SENSOR_HV7131B] = NULL,
- [SENSOR_HV7131R] = NULL,
+ [SENSOR_HV7131R] = po2030_matrix,
[SENSOR_ICM105A] = po2030_matrix,
[SENSOR_MC501CB] = NULL,
[SENSOR_MT9V111_1] = gc0305_matrix,
@@ -5936,6 +5936,7 @@
case SENSOR_ADCM2700:
case SENSOR_GC0305:
case SENSOR_HV7131B:
+ case SENSOR_HV7131R:
case SENSOR_OV7620:
case SENSOR_PAS202B:
case SENSOR_PO2030:
@@ -6108,11 +6109,13 @@
reg_w(gspca_dev, 0x02, 0x003b);
reg_w(gspca_dev, 0x00, 0x0038);
break;
+ case SENSOR_HV7131R:
case SENSOR_PAS202B:
reg_w(gspca_dev, 0x03, 0x003b);
reg_w(gspca_dev, 0x0c, 0x003a);
reg_w(gspca_dev, 0x0b, 0x0039);
- reg_w(gspca_dev, 0x0b, 0x0038);
+ if (sensor == SENSOR_PAS202B)
+ reg_w(gspca_dev, 0x0b, 0x0038);
break;
}
}
@@ -6704,10 +6707,13 @@
reg_w(gspca_dev, 0x02, 0x003b);
reg_w(gspca_dev, 0x00, 0x0038);
break;
+ case SENSOR_HV7131R:
case SENSOR_PAS202B:
reg_w(gspca_dev, 0x03, 0x003b);
reg_w(gspca_dev, 0x0c, 0x003a);
reg_w(gspca_dev, 0x0b, 0x0039);
+ if (sd->sensor == SENSOR_HV7131R)
+ reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN);
break;
}
@@ -6720,6 +6726,7 @@
break;
case SENSOR_PAS202B:
case SENSOR_GC0305:
+ case SENSOR_HV7131R:
case SENSOR_TAS5130C:
reg_r(gspca_dev, 0x0008);
/* fall thru */
@@ -6760,6 +6767,12 @@
/* ms-win + */
reg_w(gspca_dev, 0x40, 0x0117);
break;
+ case SENSOR_HV7131R:
+ i2c_write(gspca_dev, 0x25, 0x04, 0x00); /* exposure */
+ i2c_write(gspca_dev, 0x26, 0x93, 0x00);
+ i2c_write(gspca_dev, 0x27, 0xe0, 0x00);
+ reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
+ break;
case SENSOR_GC0305:
case SENSOR_TAS5130C:
reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */
@@ -6808,9 +6821,17 @@
{
struct sd *sd = (struct sd *) gspca_dev;
- if (data[0] == 0xff && data[1] == 0xd8) { /* start of frame */
+ /* check the JPEG end of frame */
+ if (len >= 3
+ && data[len - 3] == 0xff && data[len - 2] == 0xd9) {
+/*fixme: what does the last byte mean?*/
gspca_frame_add(gspca_dev, LAST_PACKET,
- NULL, 0);
+ data, len - 1);
+ return;
+ }
+
+ /* check the JPEG start of a frame */
+ if (data[0] == 0xff && data[1] == 0xd8) {
/* put the JPEG header in the new frame */
gspca_frame_add(gspca_dev, FIRST_PACKET,
sd->jpeg_hdr, JPEG_HDR_SZ);
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index a6572e5..a27d93b 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -283,6 +283,7 @@
struct hdpvr_device *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
+ struct i2c_client *client;
size_t buffer_size;
int i;
int retval = -ENOMEM;
@@ -381,13 +382,21 @@
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
retval = hdpvr_register_i2c_adapter(dev);
if (retval < 0) {
- v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n");
+ v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
goto error;
}
- retval = hdpvr_register_i2c_ir(dev);
- if (retval < 0)
- v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n");
+ client = hdpvr_register_ir_rx_i2c(dev);
+ if (!client) {
+ v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+ goto reg_fail;
+ }
+
+ client = hdpvr_register_ir_tx_i2c(dev);
+ if (!client) {
+ v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+ goto reg_fail;
+ }
#endif
/* let the user know what node this device is now attached to */
@@ -395,6 +404,10 @@
video_device_node_name(dev->video_dev));
return 0;
+reg_fail:
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_adapter(&dev->i2c_adapter);
+#endif
error:
if (dev) {
/* Destroy single thread */
@@ -424,6 +437,9 @@
mutex_lock(&dev->io_mutex);
hdpvr_cancel_queue(dev);
mutex_unlock(&dev->io_mutex);
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_adapter(&dev->i2c_adapter);
+#endif
video_unregister_device(dev->video_dev);
atomic_dec(&dev_nr);
}
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 89b71fa..e53fa55 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -31,26 +31,34 @@
#define Z8F0811_IR_RX_I2C_ADDR 0x71
-static struct i2c_board_info hdpvr_i2c_board_info = {
- I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
- I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
-};
-
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev)
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev)
{
- struct i2c_client *c;
struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+ struct i2c_board_info hdpvr_ir_tx_i2c_board_info = {
+ I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
+ };
+
+ init_data->name = "HD-PVR";
+ hdpvr_ir_tx_i2c_board_info.platform_data = init_data;
+
+ return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info);
+}
+
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
+{
+ struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+ struct i2c_board_info hdpvr_ir_rx_i2c_board_info = {
+ I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
+ };
/* Our default information for ir-kbd-i2c.c to use */
init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = RC_TYPE_RC5;
- init_data->name = "HD PVR";
- hdpvr_i2c_board_info.platform_data = init_data;
+ init_data->name = "HD-PVR";
+ hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
- c = i2c_new_device(&dev->i2c_adapter, &hdpvr_i2c_board_info);
-
- return (c == NULL) ? -ENODEV : 0;
+ return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info);
}
static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index ee74e3b..072f23c 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -313,7 +313,8 @@
/* i2c adapter registration */
int hdpvr_register_i2c_adapter(struct hdpvr_device *dev);
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev);
/*========================================================================*/
/* buffer management */
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index d2b20ad..a221ad6 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -128,6 +128,19 @@
static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
{
+ int ret;
+ unsigned char buf[1] = { 0 };
+
+ /*
+ * This is the same apparent "are you ready?" poll command observed
+ * watching Windows driver traffic and implemented in lirc_zilog. With
+ * this added, we get far saner remote behavior with z8 chips on usb
+ * connected devices, even with the default polling interval of 100ms.
+ */
+ ret = i2c_master_send(ir->c, buf, 1);
+ if (ret != 1)
+ return (ret < 0) ? ret : -EINVAL;
+
return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index ccc8849..451ecd4 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -597,7 +597,6 @@
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
init_data->type = RC_TYPE_RC5;
init_data->name = hdw->hdw_desc->description;
- init_data->polling_interval = 260; /* ms From lirc_zilog */
/* IR Receiver */
info.addr = 0x71;
info.platform_data = init_data;
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f35459d..0db9092 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1565,7 +1565,7 @@
chip_id = name[5];
/* Check whether this chip is part of the saa711x series */
- if (memcmp(name, "1f711", 5)) {
+ if (memcmp(name + 1, "f711", 4)) {
v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
client->addr << 1, name);
return -ENODEV;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 4155805..2b771f1 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -319,6 +319,9 @@
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ /* nothing to do if already disconnected */
+ if (!lcu)
+ return;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
list_del_init(&device->alias_list);
@@ -680,6 +683,9 @@
private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
+ /* nothing to do if already removed */
+ if (!lcu)
+ return 0;
spin_lock_irqsave(&lcu->lock, flags);
_remove_device_from_lcu(lcu, device);
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e9fff2b..5640c89 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -476,7 +476,7 @@
static int get_inbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
/*
* Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -643,7 +643,7 @@
static int get_outbound_buffer_frontier(struct qdio_q *q)
{
int count, stop;
- unsigned char state;
+ unsigned char state = 0;
if (need_siga_sync(q))
if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index 3fe5f41..0aad0d7 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -495,7 +495,7 @@
/* send boot data to the IR TX device */
static int send_boot_data(struct IR_tx *tx)
{
- int ret;
+ int ret, i;
unsigned char buf[4];
/* send the boot block */
@@ -503,7 +503,7 @@
if (ret != 0)
return ret;
- /* kick it off? */
+ /* Hit the go button to activate the new boot data */
buf[0] = 0x00;
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
@@ -511,7 +511,19 @@
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
- ret = i2c_master_send(tx->c, buf, 1);
+
+ /*
+ * Wait for zilog to settle after hitting go post boot block upload.
+ * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
+ * upon attempting to get firmware revision, and tx probe thus fails.
+ */
+ for (i = 0; i < 10; i++) {
+ ret = i2c_master_send(tx->c, buf, 1);
+ if (ret == 1)
+ break;
+ udelay(100);
+ }
+
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
@@ -523,8 +535,8 @@
zilog_error("i2c_master_recv failed with %d\n", ret);
return 0;
}
- if (buf[0] != 0x80) {
- zilog_error("unexpected IR TX response: %02x\n", buf[0]);
+ if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
+ zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
return 0;
}
zilog_notify("Zilog/Hauppauge IR blaster firmware version "
@@ -827,7 +839,15 @@
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
- ret = i2c_master_send(tx->c, buf, 1);
+
+ /* Give the z8 a moment to process data block */
+ for (i = 0; i < 10; i++) {
+ ret = i2c_master_send(tx->c, buf, 1);
+ if (ret == 1)
+ break;
+ udelay(100);
+ }
+
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index ee45648..7cb0f7f 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -3,6 +3,7 @@
depends on INET
select NLS
select CRYPTO
+ select CRYPTO_MD4
select CRYPTO_MD5
select CRYPTO_HMAC
select CRYPTO_ARC4
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index f1c6862..0a265ad 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -282,8 +282,6 @@
cFYI(1, "in %s", __func__);
BUG_ON(IS_ROOT(mntpt));
- xid = GetXid();
-
/*
* The MSDFS spec states that paths in DFS referral requests and
* responses must be prefixed by a single '\' character instead of
@@ -293,7 +291,7 @@
mnt = ERR_PTR(-ENOMEM);
full_path = build_path_from_dentry(mntpt);
if (full_path == NULL)
- goto free_xid;
+ goto cdda_exit;
cifs_sb = CIFS_SB(mntpt->d_inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
@@ -303,9 +301,11 @@
}
ses = tlink_tcon(tlink)->ses;
+ xid = GetXid();
rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
&num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ FreeXid(xid);
cifs_put_tlink(tlink);
@@ -338,8 +338,7 @@
free_dfs_info_array(referrals, num_referrals);
free_full_path:
kfree(full_path);
-free_xid:
- FreeXid(xid);
+cdda_exit:
cFYI(1, "leaving %s" , __func__);
return mnt;
}
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 0db5f1d..a51585f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -657,9 +657,10 @@
get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
- if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
+ if (IS_ERR(tfm_arc4)) {
+ rc = PTR_ERR(tfm_arc4);
cERROR(1, "could not allocate crypto API arc4\n");
- return PTR_ERR(tfm_arc4);
+ return rc;
}
desc.tfm = tfm_arc4;
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 14789a9..4a33302 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -127,5 +127,5 @@
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.69"
+#define CIFS_VERSION "1.70"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 3106f5e..46c66ed 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4914,7 +4914,6 @@
__u16 fid, __u32 pid_of_opener, bool SetAllocation)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
- char *data_offset;
struct file_end_of_file_info *parm_data;
int rc = 0;
__u16 params, param_offset, offset, byte_count, count;
@@ -4938,8 +4937,6 @@
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
- data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
-
count = sizeof(struct file_end_of_file_info);
pSMB->MaxParameterCount = cpu_to_le16(2);
/* BB find exact max SMB PDU from sess structure BB */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0de17c1..74c0a28 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -346,7 +346,6 @@
struct cifsTconInfo *tcon;
struct tcon_link *tlink;
struct cifsFileInfo *pCifsFile = NULL;
- struct cifsInodeInfo *pCifsInode;
char *full_path = NULL;
bool posix_open_ok = false;
__u16 netfid;
@@ -361,8 +360,6 @@
}
tcon = tlink_tcon(tlink);
- pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
-
full_path = build_path_from_dentry(file->f_path.dentry);
if (full_path == NULL) {
rc = -ENOMEM;
@@ -1146,7 +1143,6 @@
char *write_data;
int rc = -EFAULT;
int bytes_written = 0;
- struct cifs_sb_info *cifs_sb;
struct inode *inode;
struct cifsFileInfo *open_file;
@@ -1154,7 +1150,6 @@
return -EFAULT;
inode = page->mapping->host;
- cifs_sb = CIFS_SB(inode->i_sb);
offset += (loff_t)from;
write_data = kmap(page);
@@ -1667,7 +1662,8 @@
cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
- size_t total_written = 0, written = 0;
+ size_t total_written = 0;
+ unsigned int written = 0;
unsigned long num_pages, npages;
size_t copied, len, cur_len, i;
struct kvec *to_send;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 02cd60a..e8804d3 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -55,8 +55,9 @@
md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(md5)) {
+ rc = PTR_ERR(md5);
cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc);
- return PTR_ERR(md5);
+ return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
sdescmd5 = kmalloc(size, GFP_KERNEL);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index a09e077..2a930a7 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -236,10 +236,7 @@
{
__u16 mid = 0;
__u16 last_mid;
- int collision;
-
- if (server == NULL)
- return mid;
+ bool collision;
spin_lock(&GlobalMid_Lock);
last_mid = server->CurrentMid; /* we do not want to loop forever */
@@ -252,24 +249,38 @@
(and it would also have to have been a request that
did not time out) */
while (server->CurrentMid != last_mid) {
- struct list_head *tmp;
struct mid_q_entry *mid_entry;
+ unsigned int num_mids;
- collision = 0;
+ collision = false;
if (server->CurrentMid == 0)
server->CurrentMid++;
- list_for_each(tmp, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-
- if ((mid_entry->mid == server->CurrentMid) &&
- (mid_entry->midState == MID_REQUEST_SUBMITTED)) {
+ num_mids = 0;
+ list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+ ++num_mids;
+ if (mid_entry->mid == server->CurrentMid &&
+ mid_entry->midState == MID_REQUEST_SUBMITTED) {
/* This mid is in use, try a different one */
- collision = 1;
+ collision = true;
break;
}
}
- if (collision == 0) {
+
+ /*
+ * if we have more than 32k mids in the list, then something
+ * is very wrong. Possibly a local user is trying to DoS the
+ * box by issuing long-running calls and SIGKILL'ing them. If
+ * we get to 2^16 mids then we're in big trouble as this
+ * function could loop forever.
+ *
+ * Go ahead and assign out the mid in this situation, but force
+ * an eventual reconnect to clean out the pending_mid_q.
+ */
+ if (num_mids > 32768)
+ server->tcpStatus = CifsNeedReconnect;
+
+ if (!collision) {
mid = server->CurrentMid;
break;
}
@@ -381,29 +392,31 @@
}
static int
-checkSMBhdr(struct smb_hdr *smb, __u16 mid)
+check_smb_hdr(struct smb_hdr *smb, __u16 mid)
{
- /* Make sure that this really is an SMB, that it is a response,
- and that the message ids match */
- if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) &&
- (mid == smb->Mid)) {
- if (smb->Flags & SMBFLG_RESPONSE)
- return 0;
- else {
- /* only one valid case where server sends us request */
- if (smb->Command == SMB_COM_LOCKING_ANDX)
- return 0;
- else
- cERROR(1, "Received Request not response");
- }
- } else { /* bad signature or mid */
- if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
- cERROR(1, "Bad protocol string signature header %x",
- *(unsigned int *) smb->Protocol);
- if (mid != smb->Mid)
- cERROR(1, "Mids do not match");
+ /* does it have the right SMB "signature" ? */
+ if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
+ cERROR(1, "Bad protocol string signature header 0x%x",
+ *(unsigned int *)smb->Protocol);
+ return 1;
}
- cERROR(1, "bad smb detected. The Mid=%d", smb->Mid);
+
+ /* Make sure that message ids match */
+ if (mid != smb->Mid) {
+ cERROR(1, "Mids do not match. received=%u expected=%u",
+ smb->Mid, mid);
+ return 1;
+ }
+
+ /* if it's a response then accept */
+ if (smb->Flags & SMBFLG_RESPONSE)
+ return 0;
+
+ /* only one valid case where server sends us request */
+ if (smb->Command == SMB_COM_LOCKING_ANDX)
+ return 0;
+
+ cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
return 1;
}
@@ -448,7 +461,7 @@
return 1;
}
- if (checkSMBhdr(smb, mid))
+ if (check_smb_hdr(smb, mid))
return 1;
clc_len = smbCalcSize_LE(smb);
@@ -465,25 +478,26 @@
if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
return 0; /* bcc wrapped */
}
- cFYI(1, "Calculated size %d vs length %d mismatch for mid %d",
+ cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
clc_len, 4 + len, smb->Mid);
- /* Windows XP can return a few bytes too much, presumably
- an illegal pad, at the end of byte range lock responses
- so we allow for that three byte pad, as long as actual
- received length is as long or longer than calculated length */
- /* We have now had to extend this more, since there is a
- case in which it needs to be bigger still to handle a
- malformed response to transact2 findfirst from WinXP when
- access denied is returned and thus bcc and wct are zero
- but server says length is 0x21 bytes too long as if the server
- forget to reset the smb rfc1001 length when it reset the
- wct and bcc to minimum size and drop the t2 parms and data */
- if ((4+len > clc_len) && (len <= clc_len + 512))
- return 0;
- else {
- cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d",
+
+ if (4 + len < clc_len) {
+ cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
len, smb->Mid);
return 1;
+ } else if (len > clc_len + 512) {
+ /*
+ * Some servers (Windows XP in particular) send more
+ * data than the lengths in the SMB packet would
+ * indicate on certain calls (byte range locks and
+ * trans2 find first calls in particular). While the
+ * client can handle such a frame by ignoring the
+ * trailing data, we choose limit the amount of extra
+ * data to 512 bytes.
+ */
+ cERROR(1, "RFC1001 size %u more than 512 bytes larger "
+ "than SMB for mid=%u", len, smb->Mid);
+ return 1;
}
}
return 0;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 7f25cc3..f8e4cd2 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -764,7 +764,6 @@
{
int rc = 0;
int xid, i;
- struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
struct cifsFileInfo *cifsFile = NULL;
char *current_entry;
@@ -775,8 +774,6 @@
xid = GetXid();
- cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
/*
* Ensure FindFirst doesn't fail before doing filldir() for '.' and
* '..'. Otherwise we won't be able to notify VFS in case of failure.
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index b5450e9..b5041c8 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -58,8 +58,9 @@
md4 = crypto_alloc_shash("md4", 0, 0);
if (IS_ERR(md4)) {
+ rc = PTR_ERR(md4);
cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc);
- return PTR_ERR(md4);
+ return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
sdescmd4 = kmalloc(size, GFP_KERNEL);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index c1ccca1..b8c5e2e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -236,9 +236,9 @@
server->tcpStatus = CifsNeedReconnect;
}
- if (rc < 0) {
+ if (rc < 0 && rc != -EINTR)
cERROR(1, "Error %d sending data on socket to server", rc);
- } else
+ else
rc = 0;
/* Don't want to modify the buffer as a
@@ -570,17 +570,33 @@
#endif
mutex_unlock(&ses->server->srv_mutex);
- cifs_small_buf_release(in_buf);
- if (rc < 0)
+ if (rc < 0) {
+ cifs_small_buf_release(in_buf);
goto out;
+ }
- if (long_op == CIFS_ASYNC_OP)
+ if (long_op == CIFS_ASYNC_OP) {
+ cifs_small_buf_release(in_buf);
goto out;
+ }
rc = wait_for_response(ses->server, midQ);
- if (rc != 0)
- goto out;
+ if (rc != 0) {
+ send_nt_cancel(ses->server, in_buf, midQ);
+ spin_lock(&GlobalMid_Lock);
+ if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ midQ->callback = DeleteMidQEntry;
+ spin_unlock(&GlobalMid_Lock);
+ cifs_small_buf_release(in_buf);
+ atomic_dec(&ses->server->inFlight);
+ wake_up(&ses->server->request_q);
+ return rc;
+ }
+ spin_unlock(&GlobalMid_Lock);
+ }
+
+ cifs_small_buf_release(in_buf);
rc = sync_mid_result(midQ, ses->server);
if (rc != 0) {
@@ -724,8 +740,19 @@
goto out;
rc = wait_for_response(ses->server, midQ);
- if (rc != 0)
- goto out;
+ if (rc != 0) {
+ send_nt_cancel(ses->server, in_buf, midQ);
+ spin_lock(&GlobalMid_Lock);
+ if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ /* no longer considered to be "in-flight" */
+ midQ->callback = DeleteMidQEntry;
+ spin_unlock(&GlobalMid_Lock);
+ atomic_dec(&ses->server->inFlight);
+ wake_up(&ses->server->request_q);
+ return rc;
+ }
+ spin_unlock(&GlobalMid_Lock);
+ }
rc = sync_mid_result(midQ, ses->server);
if (rc != 0) {
@@ -922,10 +949,21 @@
}
}
- if (wait_for_response(ses->server, midQ) == 0) {
- /* We got the response - restart system call. */
- rstart = 1;
+ rc = wait_for_response(ses->server, midQ);
+ if (rc) {
+ send_nt_cancel(ses->server, in_buf, midQ);
+ spin_lock(&GlobalMid_Lock);
+ if (midQ->midState == MID_REQUEST_SUBMITTED) {
+ /* no longer considered to be "in-flight" */
+ midQ->callback = DeleteMidQEntry;
+ spin_unlock(&GlobalMid_Lock);
+ return rc;
+ }
+ spin_unlock(&GlobalMid_Lock);
}
+
+ /* We got the response - restart system call. */
+ rstart = 1;
}
rc = sync_mid_result(midQ, ses->server);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index cc8a9b7..267d0ad 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1114,6 +1114,17 @@
return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
}
+static inline struct timespec ep_set_mstimeout(long ms)
+{
+ struct timespec now, ts = {
+ .tv_sec = ms / MSEC_PER_SEC,
+ .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
+ };
+
+ ktime_get_ts(&now);
+ return timespec_add_safe(now, ts);
+}
+
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
@@ -1121,12 +1132,11 @@
unsigned long flags;
long slack;
wait_queue_t wait;
- struct timespec end_time;
ktime_t expires, *to = NULL;
if (timeout > 0) {
- ktime_get_ts(&end_time);
- timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
+ struct timespec end_time = ep_set_mstimeout(timeout);
+
slack = select_estimate_accuracy(&end_time);
to = &expires;
*to = timespec_to_ktime(end_time);
diff --git a/fs/exec.c b/fs/exec.c
index c62efcb..52a447d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -120,7 +120,7 @@
goto out;
file = do_filp_open(AT_FDCWD, tmp,
- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+ O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
MAY_READ | MAY_EXEC | MAY_OPEN);
putname(tmp);
error = PTR_ERR(file);
@@ -723,7 +723,7 @@
int err;
file = do_filp_open(AT_FDCWD, name,
- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+ O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
MAY_EXEC | MAY_OPEN);
if (IS_ERR(file))
goto out;
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 4268542..a755523 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1030,7 +1030,6 @@
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
}
- inode->i_mapping->backing_dev_info = sb->s_bdi;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &exofs_file_inode_operations;
inode->i_fop = &exofs_file_operations;
@@ -1131,7 +1130,6 @@
sbi = sb->s_fs_info;
- inode->i_mapping->backing_dev_info = sb->s_bdi;
sb->s_dirt = 1;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ecc8b39..cb10261 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -815,7 +815,7 @@
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- FMODE_EXEC
+ __FMODE_EXEC
));
fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/ioctl.c b/fs/ioctl.c
index a59635e..1eebeb7 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -273,6 +273,13 @@
len = isize;
}
+ /*
+ * Some filesystems can't deal with being asked to map less than
+ * blocksize, so make sure our len is at least block length.
+ */
+ if (logical_to_blk(inode, len) == 0)
+ len = blk_to_logical(inode, 1);
+
start_blk = logical_to_blk(inode, start);
last_blk = logical_to_blk(inode, start + len - 1);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 32b38cd..bd32159 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2555,9 +2555,12 @@
void __user *buffer, size_t *lenp, loff_t *ppos);
int __init get_filesystem_list(char *buf);
+#define __FMODE_EXEC ((__force int) FMODE_EXEC)
+#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
+
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
- (flag & FMODE_NONOTIFY)))
+ (flag & __FMODE_NONOTIFY)))
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fcb9884..a5930cb 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -182,6 +182,26 @@
return ret;
}
+/**
+ * res_counter_check_margin - check if the counter allows charging
+ * @cnt: the resource counter to check
+ * @bytes: the number of bytes to check the remaining space against
+ *
+ * Returns a boolean value on whether the counter can be charged
+ * @bytes or whether this would exceed the limit.
+ */
+static inline bool res_counter_check_margin(struct res_counter *cnt,
+ unsigned long bytes)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ ret = cnt->limit - cnt->usage >= bytes;
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return ret;
+}
+
static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
{
bool ret;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 126a302..999835b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1901,11 +1901,12 @@
return;
raw_spin_lock(&ctx->lock);
- update_context_time(ctx);
+ if (ctx->is_active)
+ update_context_time(ctx);
update_event_times(event);
+ if (event->state == PERF_EVENT_STATE_ACTIVE)
+ event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
-
- event->pmu->read(event);
}
static inline u64 perf_event_count(struct perf_event *event)
@@ -1999,8 +2000,7 @@
* accessed from NMI. Use a temporary manual per cpu allocation
* until that gets sorted out.
*/
- size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
- num_possible_cpus();
+ size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
entries = kzalloc(size, GFP_KERNEL);
if (!entries)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d7ebdf4..f37f974 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -27,7 +27,7 @@
#include <asm/irq_regs.h>
#include <linux/perf_event.h>
-int watchdog_enabled;
+int watchdog_enabled = 1;
int __read_mostly softlockup_thresh = 60;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -43,9 +43,6 @@
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
#endif
-static int no_watchdog;
-
-
/* boot commands */
/*
* Should we panic when a soft-lockup or hard-lockup occurs:
@@ -58,7 +55,7 @@
if (!strncmp(str, "panic", 5))
hardlockup_panic = 1;
else if (!strncmp(str, "0", 1))
- no_watchdog = 1;
+ watchdog_enabled = 0;
return 1;
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -77,7 +74,7 @@
static int __init nowatchdog_setup(char *str)
{
- no_watchdog = 1;
+ watchdog_enabled = 0;
return 1;
}
__setup("nowatchdog", nowatchdog_setup);
@@ -85,7 +82,7 @@
/* deprecated */
static int __init nosoftlockup_setup(char *str)
{
- no_watchdog = 1;
+ watchdog_enabled = 0;
return 1;
}
__setup("nosoftlockup", nosoftlockup_setup);
@@ -432,9 +429,6 @@
wake_up_process(p);
}
- /* if any cpu succeeds, watchdog is considered enabled for the system */
- watchdog_enabled = 1;
-
return 0;
}
@@ -462,12 +456,16 @@
static void watchdog_enable_all_cpus(void)
{
int cpu;
- int result = 0;
+
+ watchdog_enabled = 0;
for_each_online_cpu(cpu)
- result += watchdog_enable(cpu);
+ if (!watchdog_enable(cpu))
+ /* if any cpu succeeds, watchdog is considered
+ enabled for the system */
+ watchdog_enabled = 1;
- if (result)
+ if (!watchdog_enabled)
printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
}
@@ -476,9 +474,6 @@
{
int cpu;
- if (no_watchdog)
- return;
-
for_each_online_cpu(cpu)
watchdog_disable(cpu);
@@ -498,10 +493,12 @@
{
proc_dointvec(table, write, buffer, length, ppos);
- if (watchdog_enabled)
- watchdog_enable_all_cpus();
- else
- watchdog_disable_all_cpus();
+ if (write) {
+ if (watchdog_enabled)
+ watchdog_enable_all_cpus();
+ else
+ watchdog_disable_all_cpus();
+ }
return 0;
}
@@ -530,7 +527,8 @@
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- err = watchdog_enable(hotcpu);
+ if (watchdog_enabled)
+ err = watchdog_enable(hotcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
@@ -555,9 +553,6 @@
void *cpu = (void *)(long)smp_processor_id();
int err;
- if (no_watchdog)
- return;
-
err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
WARN_ON(notifier_to_errno(err));
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e187454..b6c1ce3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1162,7 +1162,12 @@
/* after clearing PageTail the gup refcount can be released */
smp_mb();
- page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+ /*
+ * retain hwpoison flag of the poisoned tail page:
+ * fix for the unsuitable process killed on Guest Machine(KVM)
+ * by the memory-failure.
+ */
+ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
page_tail->flags |= (page->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3878cfe..da53a25 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -612,8 +612,10 @@
/* pagein of a big page is an event. So, ignore page size */
if (nr_pages > 0)
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
- else
+ else {
__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
+ nr_pages = -nr_pages; /* for event */
+ }
__this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
@@ -1111,6 +1113,23 @@
return false;
}
+/**
+ * mem_cgroup_check_margin - check if the memory cgroup allows charging
+ * @mem: memory cgroup to check
+ * @bytes: the number of bytes the caller intends to charge
+ *
+ * Returns a boolean value on whether @mem can be charged @bytes or
+ * whether this would exceed the limit.
+ */
+static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes)
+{
+ if (!res_counter_check_margin(&mem->res, bytes))
+ return false;
+ if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes))
+ return false;
+ return true;
+}
+
static unsigned int get_swappiness(struct mem_cgroup *memcg)
{
struct cgroup *cgrp = memcg->css.cgroup;
@@ -1837,23 +1856,34 @@
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
} else
mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
-
- if (csize > PAGE_SIZE) /* change csize and retry */
+ /*
+ * csize can be either a huge page (HPAGE_SIZE), a batch of
+ * regular pages (CHARGE_SIZE), or a single regular page
+ * (PAGE_SIZE).
+ *
+ * Never reclaim on behalf of optional batching, retry with a
+ * single page instead.
+ */
+ if (csize == CHARGE_SIZE)
return CHARGE_RETRY;
if (!(gfp_mask & __GFP_WAIT))
return CHARGE_WOULDBLOCK;
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
- gfp_mask, flags);
+ gfp_mask, flags);
+ if (mem_cgroup_check_margin(mem_over_limit, csize))
+ return CHARGE_RETRY;
/*
- * try_to_free_mem_cgroup_pages() might not give us a full
- * picture of reclaim. Some pages are reclaimed and might be
- * moved to swap cache or just unmapped from the cgroup.
- * Check the limit again to see if the reclaim reduced the
- * current usage of the cgroup before giving up
+ * Even though the limit is exceeded at this point, reclaim
+ * may have been able to free some pages. Retry the charge
+ * before killing the task.
+ *
+ * Only for regular pages, though: huge pages are rather
+ * unlikely to succeed so close to the limit, and we fall back
+ * to regular pages anyway in case of failure.
*/
- if (ret || mem_cgroup_check_under_limit(mem_over_limit))
+ if (csize == PAGE_SIZE && ret)
return CHARGE_RETRY;
/*
@@ -2323,13 +2353,19 @@
gfp_t gfp_mask, enum charge_type ctype)
{
struct mem_cgroup *mem = NULL;
- struct page_cgroup *pc;
- int ret;
int page_size = PAGE_SIZE;
+ struct page_cgroup *pc;
+ bool oom = true;
+ int ret;
if (PageTransHuge(page)) {
page_size <<= compound_order(page);
VM_BUG_ON(!PageTransHuge(page));
+ /*
+ * Never OOM-kill a process for a huge page. The
+ * fault handler will fall back to regular pages.
+ */
+ oom = false;
}
pc = lookup_page_cgroup(page);
@@ -2338,7 +2374,7 @@
return 0;
prefetchw(pc);
- ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
if (ret || !mem)
return ret;
@@ -5024,9 +5060,9 @@
static int __init enable_swap_account(char *s)
{
/* consider enabled if no parameter or 1 is given */
- if (!s || !strcmp(s, "1"))
+ if (!(*s) || !strcmp(s, "=1"))
really_do_swap_account = 1;
- else if (!strcmp(s, "0"))
+ else if (!strcmp(s, "=0"))
really_do_swap_account = 0;
return 1;
}
@@ -5034,7 +5070,8 @@
static int __init disable_swap_account(char *s)
{
- enable_swap_account("0");
+ printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
+ enable_swap_account("=0");
return 1;
}
__setup("noswapaccount", disable_swap_account);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 548fbd7..0207c2f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -233,8 +233,8 @@
}
/*
- * Only all shrink_slab here (which would also
- * shrink other caches) if access is not potentially fatal.
+ * Only call shrink_slab here (which would also shrink other caches) if
+ * access is not potentially fatal.
*/
if (access) {
int nr;
@@ -386,8 +386,6 @@
struct task_struct *tsk;
struct anon_vma *av;
- if (!PageHuge(page) && unlikely(split_huge_page(page)))
- return;
read_lock(&tasklist_lock);
av = page_lock_anon_vma(page);
if (av == NULL) /* Not actually mapped anymore */
@@ -856,6 +854,7 @@
int ret;
int kill = 1;
struct page *hpage = compound_head(p);
+ struct page *ppage;
if (PageReserved(p) || PageSlab(p))
return SWAP_SUCCESS;
@@ -897,6 +896,44 @@
}
/*
+ * ppage: poisoned page
+ * if p is regular page(4k page)
+ * ppage == real poisoned page;
+ * else p is hugetlb or THP, ppage == head page.
+ */
+ ppage = hpage;
+
+ if (PageTransHuge(hpage)) {
+ /*
+ * Verify that this isn't a hugetlbfs head page, the check for
+ * PageAnon is just for avoid tripping a split_huge_page
+ * internal debug check, as split_huge_page refuses to deal with
+ * anything that isn't an anon page. PageAnon can't go away fro
+ * under us because we hold a refcount on the hpage, without a
+ * refcount on the hpage. split_huge_page can't be safely called
+ * in the first place, having a refcount on the tail isn't
+ * enough * to be safe.
+ */
+ if (!PageHuge(hpage) && PageAnon(hpage)) {
+ if (unlikely(split_huge_page(hpage))) {
+ /*
+ * FIXME: if splitting THP is failed, it is
+ * better to stop the following operation rather
+ * than causing panic by unmapping. System might
+ * survive if the page is freed later.
+ */
+ printk(KERN_INFO
+ "MCE %#lx: failed to split THP\n", pfn);
+
+ BUG_ON(!PageHWPoison(p));
+ return SWAP_FAIL;
+ }
+ /* THP is split, so ppage should be the real poisoned page. */
+ ppage = p;
+ }
+ }
+
+ /*
* First collect all the processes that have the page
* mapped in dirty form. This has to be done before try_to_unmap,
* because ttu takes the rmap data structures down.
@@ -905,12 +942,18 @@
* there's nothing that can be done.
*/
if (kill)
- collect_procs(hpage, &tokill);
+ collect_procs(ppage, &tokill);
- ret = try_to_unmap(hpage, ttu);
+ if (hpage != ppage)
+ lock_page_nosync(ppage);
+
+ ret = try_to_unmap(ppage, ttu);
if (ret != SWAP_SUCCESS)
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
- pfn, page_mapcount(hpage));
+ pfn, page_mapcount(ppage));
+
+ if (hpage != ppage)
+ unlock_page(ppage);
/*
* Now that the dirty bit has been propagated to the
@@ -921,7 +964,7 @@
* use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory.
*/
- kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
+ kill_procs_ao(&tokill, !!PageDirty(ppage), trapno,
ret != SWAP_SUCCESS, p, pfn);
return ret;
@@ -1022,19 +1065,22 @@
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
- if (!PageLRU(p) && !PageHuge(p))
- shake_page(p, 0);
- if (!PageLRU(p) && !PageHuge(p)) {
- /*
- * shake_page could have turned it free.
- */
- if (is_free_buddy_page(p)) {
- action_result(pfn, "free buddy, 2nd try", DELAYED);
- return 0;
+ if (!PageHuge(p) && !PageTransCompound(p)) {
+ if (!PageLRU(p))
+ shake_page(p, 0);
+ if (!PageLRU(p)) {
+ /*
+ * shake_page could have turned it free.
+ */
+ if (is_free_buddy_page(p)) {
+ action_result(pfn, "free buddy, 2nd try",
+ DELAYED);
+ return 0;
+ }
+ action_result(pfn, "non LRU", IGNORED);
+ put_page(p);
+ return -EBUSY;
}
- action_result(pfn, "non LRU", IGNORED);
- put_page(p);
- return -EBUSY;
}
/*
@@ -1064,7 +1110,7 @@
* For error on the tail page, we should set PG_hwpoison
* on the head page to show that the hugepage is hwpoisoned
*/
- if (PageTail(p) && TestSetPageHWPoison(hpage)) {
+ if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
action_result(pfn, "hugepage already hardware poisoned",
IGNORED);
unlock_page(hpage);
@@ -1295,7 +1341,10 @@
ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
true);
if (ret) {
- putback_lru_pages(&pagelist);
+ struct page *page1, *page2;
+ list_for_each_entry_safe(page1, page2, &pagelist, lru)
+ put_page(page1);
+
pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
if (ret > 0)
@@ -1419,6 +1468,7 @@
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
0, true);
if (ret) {
+ putback_lru_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
if (ret > 0)
diff --git a/mm/migrate.c b/mm/migrate.c
index 9f29a3b..7661152 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -772,6 +772,7 @@
unlock:
unlock_page(page);
+move_newpage:
if (rc != -EAGAIN) {
/*
* A page that has been migrated has all references
@@ -785,8 +786,6 @@
putback_lru_page(page);
}
-move_newpage:
-
/*
* Move the new page to the LRU. If migration was not successful
* then this will free the page.
@@ -981,10 +980,6 @@
}
rc = 0;
out:
-
- list_for_each_entry_safe(page, page2, from, lru)
- put_page(page);
-
if (rc)
return rc;
diff --git a/mm/mlock.c b/mm/mlock.c
index 13e81ee..c3924c7f 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -178,6 +178,13 @@
if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
gup_flags |= FOLL_WRITE;
+ /*
+ * We want mlock to succeed for regions that have any permissions
+ * other than PROT_NONE.
+ */
+ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+ gup_flags |= FOLL_FORCE;
+
if (vma->vm_flags & VM_LOCKED)
gup_flags |= FOLL_MLOCK;