Merge "msm: idle-v7: flush non secure L1 cache lines for l2 GDHS LPM mode"
diff --git a/arch/arm/boot/dts/dsi-panel-hx8389b-qhd-video.dtsi b/arch/arm/boot/dts/dsi-panel-hx8389b-qhd-video.dtsi
index 7e63014..8f94502 100755
--- a/arch/arm/boot/dts/dsi-panel-hx8389b-qhd-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-hx8389b-qhd-video.dtsi
@@ -124,7 +124,7 @@
qcom,mdss-dsi-panel-timings = [97 23 17 00 4B 53 1C 27 27 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x04>;
qcom,mdss-dsi-t-clk-pre = <0x1b>;
- qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-min-level = <26>;
qcom,mdss-dsi-bl-max-level = <255>;
qcom,mdss-dsi-dma-trigger = <4>;
qcom,mdss-dsi-mdp-trigger = <0>;
diff --git a/arch/arm/boot/dts/dsi-panel-otm8018b-fwvga-video.dtsi b/arch/arm/boot/dts/dsi-panel-otm8018b-fwvga-video.dtsi
index 9477c56..393419b 100644
--- a/arch/arm/boot/dts/dsi-panel-otm8018b-fwvga-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-otm8018b-fwvga-video.dtsi
@@ -256,7 +256,7 @@
qcom,mdss-dsi-panel-timings = [8B 1F 14 00 45 4A 19 23 23 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x04>;
qcom,mdss-dsi-t-clk-pre = <0x1b>;
- qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-min-level = <26>;
qcom,mdss-dsi-bl-max-level = <255>;
qcom,mdss-dsi-dma-trigger = <4>;
qcom,mdss-dsi-mdp-trigger = <0>;
diff --git a/arch/arm/boot/dts/msm8226-1080p-cdp.dtsi b/arch/arm/boot/dts/msm8226-1080p-cdp.dtsi
index b2ff209..e310c17 100644
--- a/arch/arm/boot/dts/msm8226-1080p-cdp.dtsi
+++ b/arch/arm/boot/dts/msm8226-1080p-cdp.dtsi
@@ -477,3 +477,16 @@
qcom,fast-avg-setup = <0>;
};
};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_jdi_1080_vid>;
+ qcom,platform-enable-gpio = <&msmgpio 109 0>;
+};
+
+&dsi_jdi_1080_vid {
+ qcom,cont-splash-enabled;
+};
diff --git a/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi b/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
index 789e4d8..e58c321 100644
--- a/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
+++ b/arch/arm/boot/dts/msm8226-1080p-mtp.dtsi
@@ -494,3 +494,16 @@
qcom,cdc-micbias1-ext-cap;
};
};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_jdi_1080_vid>;
+ qcom,platform-enable-gpio = <&msmgpio 109 0>;
+};
+
+&dsi_jdi_1080_vid {
+ qcom,cont-splash-enabled;
+};
diff --git a/arch/arm/boot/dts/msm8226-mdss-panels.dtsi b/arch/arm/boot/dts/msm8226-mdss-panels.dtsi
index eeec175..0731a9a 100644
--- a/arch/arm/boot/dts/msm8226-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/msm8226-mdss-panels.dtsi
@@ -16,3 +16,4 @@
/include/ "dsi-panel-nt35596-1080p-video.dtsi"
/include/ "dsi-panel-nt35590-720p-cmd.dtsi"
/include/ "dsi-panel-ssd2080m-720p-video.dtsi"
+/include/ "dsi-panel-jdi-1080p-video.dtsi"
diff --git a/arch/arm/boot/dts/msm8610-qrd.dtsi b/arch/arm/boot/dts/msm8610-qrd.dtsi
index 71748ea..85bd746 100644
--- a/arch/arm/boot/dts/msm8610-qrd.dtsi
+++ b/arch/arm/boot/dts/msm8610-qrd.dtsi
@@ -309,6 +309,7 @@
qcom,chgr@1000 {
status = "ok";
+ qcom,tchg-mins = <250>;
};
qcom,buck@1100 {
diff --git a/arch/arm/mach-msm/board-8226-gpiomux.c b/arch/arm/mach-msm/board-8226-gpiomux.c
index 34e23d1..9767746 100644
--- a/arch/arm/mach-msm/board-8226-gpiomux.c
+++ b/arch/arm/mach-msm/board-8226-gpiomux.c
@@ -185,7 +185,14 @@
static struct msm_gpiomux_config msm_lcd_configs[] __initdata = {
{
- .gpio = 25,
+ .gpio = 25, /* LCD Reset */
+ .settings = {
+ [GPIOMUX_ACTIVE] = &lcd_rst_act_cfg,
+ [GPIOMUX_SUSPENDED] = &lcd_rst_sus_cfg,
+ },
+ },
+ {
+ .gpio = 109, /* LCD Enable */
.settings = {
[GPIOMUX_ACTIVE] = &lcd_rst_act_cfg,
[GPIOMUX_SUSPENDED] = &lcd_rst_sus_cfg,
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index c5f9021..fbf4eeb 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -77,7 +77,7 @@
printk("Mem-info:\n");
show_free_areas(filter);
printk("Free swap: %6ldkB\n",
- nr_swap_pages << (PAGE_SHIFT-10));
+ get_nr_swap_pages() << (PAGE_SHIFT-10));
printk("%ld pages of RAM\n", totalram_pages);
printk("%ld free pages\n", nr_free_pages());
#if 0 /* undefined pgtable_cache_size, pgd_cache_size */
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2410aa8..ea7dd38 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -61,7 +61,7 @@
global_page_state(NR_PAGETABLE),
global_page_state(NR_BOUNCE),
global_page_state(NR_FILE_PAGES),
- nr_swap_pages);
+ get_nr_swap_pages());
for_each_zone(zone) {
unsigned long flags, order, total = 0, largest_order = -1;
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
index 9932186..890066e 100644
--- a/drivers/video/msm/mdss/mdss_dsi_panel.c
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -293,6 +293,15 @@
ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
panel_data);
+ /*
+ * Some backlight controllers specify a minimum duty cycle
+ * for the backlight brightness. If the brightness is less
+ * than it, the controller can malfunction.
+ */
+
+ if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0))
+ bl_level = pdata->panel_info.bl_min;
+
switch (ctrl_pdata->bklt_ctrl) {
case BL_WLED:
led_trigger_event(bl_led_trigger, bl_level);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 227fd3e..9a584c0 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -2,6 +2,7 @@
#define LINUX_MM_INLINE_H
#include <linux/huge_mm.h>
+#include <linux/swap.h>
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7539e03..381fff5 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -375,7 +375,6 @@
* free areas of different sizes
*/
spinlock_t lock;
- int all_unreclaimable; /* All pages pinned */
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 036b107..c1fcf34 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -197,6 +197,18 @@
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
+ spinlock_t lock; /*
+ * protect map scan related fields like
+ * swap_map, lowest_bit, highest_bit,
+ * inuse_pages, cluster_next,
+ * cluster_nr, lowest_alloc and
+ * highest_alloc. other fields are only
+ * changed at swapon/swapoff, so are
+ * protected by swap_lock. changing
+ * flags need hold this lock and
+ * swap_lock. If both locks need hold,
+ * hold swap_lock first.
+ */
};
struct swap_list_t {
@@ -204,9 +216,6 @@
int next; /* swapfile to be used next */
};
-/* Swap 50% full? Release swapcache more aggressively.. */
-#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
-
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
@@ -335,8 +344,20 @@
struct vm_area_struct *vma, unsigned long addr);
/* linux/mm/swapfile.c */
-extern long nr_swap_pages;
+extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
+
+/* Swap 50% full? Release swapcache more aggressively.. */
+static inline bool vm_swap_full(void)
+{
+ return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
+}
+
+static inline long get_nr_swap_pages(void)
+{
+ return atomic_long_read(&nr_swap_pages);
+}
+
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
@@ -370,9 +391,10 @@
#else /* CONFIG_SWAP */
-#define nr_swap_pages 0L
+#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages 0UL
+#define vm_swap_full() 0
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 1d10474..2d59889 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -140,7 +140,6 @@
}
extern unsigned long global_reclaimable_pages(void);
-extern unsigned long zone_reclaimable_pages(struct zone *zone);
#ifdef CONFIG_NUMA
/*
diff --git a/mm/internal.h b/mm/internal.h
index 3439ef4..f5369cc 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -90,6 +90,8 @@
*/
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
+extern bool zone_reclaimable(struct zone *zone);
/*
* in mm/page_alloc.c
diff --git a/mm/mmap.c b/mm/mmap.c
index 848ef52..9932edb 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -133,7 +133,7 @@
*/
free -= global_page_state(NR_SHMEM);
- free += nr_swap_pages;
+ free += get_nr_swap_pages();
/*
* Any slabs which are created with the
diff --git a/mm/nommu.c b/mm/nommu.c
index bb8f4f0..6bb7042 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1928,7 +1928,7 @@
*/
free -= global_page_state(NR_SHMEM);
- free += nr_swap_pages;
+ free += get_nr_swap_pages();
/*
* Any slabs which are created with the
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 26adea8..a5e8dc2 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -34,8 +34,11 @@
#include <linux/syscalls.h>
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
#include <linux/pagevec.h>
+#include <linux/mm_inline.h>
#include <trace/events/writeback.h>
+#include "internal.h"
+
/*
* Sleep at most 200ms at a time in balance_dirty_pages().
*/
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 46ccd2f..a1e4f77 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
+#include <linux/mm_inline.h>
#include <linux/migrate.h>
#include <linux/page-debug-flags.h>
@@ -647,7 +648,6 @@
int mt = 0;
spin_lock(&zone->lock);
- zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
while (to_free) {
@@ -693,7 +693,6 @@
int migratetype)
{
spin_lock(&zone->lock);
- zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
@@ -2997,7 +2996,7 @@
K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
zone->pages_scanned,
- (zone->all_unreclaimable ? "yes" : "no")
+ (!zone_reclaimable(zone) ? "yes" : "no")
);
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 4c5ff7f..eb6a79c 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -58,7 +58,8 @@
printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
swap_cache_info.add_total, swap_cache_info.del_total,
swap_cache_info.find_success, swap_cache_info.find_total);
- printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
+ printk("Free swap = %ldkB\n",
+ get_nr_swap_pages() << (PAGE_SHIFT - 10));
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a6c07fd..9ae4c8d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -45,9 +45,11 @@
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
-long nr_swap_pages;
+atomic_long_t nr_swap_pages;
+/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
long total_swap_pages;
static int least_priority;
+static atomic_t highest_priority_index = ATOMIC_INIT(-1);
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
@@ -221,7 +223,7 @@
si->lowest_alloc = si->max;
si->highest_alloc = 0;
}
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
/*
* If seek is expensive, start searching for new cluster from
@@ -240,7 +242,7 @@
if (si->swap_map[offset])
last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
@@ -261,7 +263,7 @@
if (si->swap_map[offset])
last_in_cluster = offset + SWAPFILE_CLUSTER;
else if (offset == last_in_cluster) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
offset -= SWAPFILE_CLUSTER - 1;
si->cluster_next = offset;
si->cluster_nr = SWAPFILE_CLUSTER - 1;
@@ -275,7 +277,7 @@
}
offset = scan_base;
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
si->cluster_nr = SWAPFILE_CLUSTER - 1;
si->lowest_alloc = 0;
}
@@ -291,9 +293,9 @@
/* reuse swap entry of cache-only swap if not busy. */
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
/* entry was freed successfully, try to use this again */
if (swap_was_freed)
goto checks;
@@ -333,13 +335,13 @@
si->lowest_alloc <= last_in_cluster)
last_in_cluster = si->lowest_alloc - 1;
si->flags |= SWP_DISCARDING;
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
if (offset < last_in_cluster)
discard_swap_cluster(si, offset,
last_in_cluster - offset + 1);
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
si->lowest_alloc = 0;
si->flags &= ~SWP_DISCARDING;
@@ -353,10 +355,10 @@
* could defer that delay until swap_writepage,
* but it's easier to keep this self-contained.
*/
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
wait_for_discard, TASK_UNINTERRUPTIBLE);
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
} else {
/*
* Note pages allocated by racing tasks while
@@ -372,14 +374,14 @@
return offset;
scan:
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
while (++offset <= si->highest_bit) {
if (!si->swap_map[offset]) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
goto checks;
}
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
@@ -390,11 +392,11 @@
offset = si->lowest_bit;
while (++offset < scan_base) {
if (!si->swap_map[offset]) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
goto checks;
}
if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
goto checks;
}
if (unlikely(--latency_ration < 0)) {
@@ -402,7 +404,7 @@
latency_ration = LATENCY_LIMIT;
}
}
- spin_lock(&swap_lock);
+ spin_lock(&si->lock);
no_page:
si->flags -= SWP_SCANNING;
@@ -415,13 +417,34 @@
pgoff_t offset;
int type, next;
int wrapped = 0;
+ int hp_index;
spin_lock(&swap_lock);
- if (nr_swap_pages <= 0)
+ if (atomic_long_read(&nr_swap_pages) <= 0)
goto noswap;
- nr_swap_pages--;
+ atomic_long_dec(&nr_swap_pages);
for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
+ hp_index = atomic_xchg(&highest_priority_index, -1);
+ /*
+ * highest_priority_index records current highest priority swap
+ * type which just frees swap entries. If its priority is
+ * higher than that of swap_list.next swap type, we use it. It
+ * isn't protected by swap_lock, so it can be an invalid value
+ * if the corresponding swap type is swapoff. We double check
+ * the flags here. It's even possible the swap type is swapoff
+ * and swapon again and its priority is changed. In such rare
+ * case, low prority swap type might be used, but eventually
+ * high priority swap will be used after several rounds of
+ * swap.
+ */
+ if (hp_index != -1 && hp_index != type &&
+ swap_info[type]->prio < swap_info[hp_index]->prio &&
+ (swap_info[hp_index]->flags & SWP_WRITEOK)) {
+ type = hp_index;
+ swap_list.next = type;
+ }
+
si = swap_info[type];
next = si->next;
if (next < 0 ||
@@ -430,22 +453,29 @@
wrapped++;
}
- if (!si->highest_bit)
+ spin_lock(&si->lock);
+ if (!si->highest_bit) {
+ spin_unlock(&si->lock);
continue;
- if (!(si->flags & SWP_WRITEOK))
+ }
+ if (!(si->flags & SWP_WRITEOK)) {
+ spin_unlock(&si->lock);
continue;
+ }
swap_list.next = next;
+
+ spin_unlock(&swap_lock);
/* This is called for allocating swap entry for cache */
offset = scan_swap_map(si, SWAP_HAS_CACHE);
- if (offset) {
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
+ if (offset)
return swp_entry(type, offset);
- }
+ spin_lock(&swap_lock);
next = swap_list.next;
}
- nr_swap_pages++;
+ atomic_long_inc(&nr_swap_pages);
noswap:
spin_unlock(&swap_lock);
return (swp_entry_t) {0};
@@ -457,19 +487,19 @@
struct swap_info_struct *si;
pgoff_t offset;
- spin_lock(&swap_lock);
si = swap_info[type];
+ spin_lock(&si->lock);
if (si && (si->flags & SWP_WRITEOK)) {
- nr_swap_pages--;
+ atomic_long_dec(&nr_swap_pages);
/* This is called for allocating swap entry, not cache */
offset = scan_swap_map(si, 1);
if (offset) {
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
return swp_entry(type, offset);
}
- nr_swap_pages++;
+ atomic_long_inc(&nr_swap_pages);
}
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
return (swp_entry_t) {0};
}
@@ -491,7 +521,7 @@
goto bad_offset;
if (!p->swap_map[offset])
goto bad_free;
- spin_lock(&swap_lock);
+ spin_lock(&p->lock);
return p;
bad_free:
@@ -509,6 +539,27 @@
return NULL;
}
+/*
+ * This swap type frees swap entry, check if it is the highest priority swap
+ * type which just frees swap entry. get_swap_page() uses
+ * highest_priority_index to search highest priority swap type. The
+ * swap_info_struct.lock can't protect us if there are multiple swap types
+ * active, so we use atomic_cmpxchg.
+ */
+static void set_highest_priority_index(int type)
+{
+ int old_hp_index, new_hp_index;
+
+ do {
+ old_hp_index = atomic_read(&highest_priority_index);
+ if (old_hp_index != -1 &&
+ swap_info[old_hp_index]->prio >= swap_info[type]->prio)
+ break;
+ new_hp_index = type;
+ } while (atomic_cmpxchg(&highest_priority_index,
+ old_hp_index, new_hp_index) != old_hp_index);
+}
+
static unsigned char swap_entry_free(struct swap_info_struct *p,
swp_entry_t entry, unsigned char usage)
{
@@ -552,10 +603,8 @@
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
- if (swap_list.next >= 0 &&
- p->prio > swap_info[swap_list.next]->prio)
- swap_list.next = p->type;
- nr_swap_pages++;
+ set_highest_priority_index(p->type);
+ atomic_long_inc(&nr_swap_pages);
p->inuse_pages--;
if ((p->flags & SWP_BLKDEV) &&
disk->fops->swap_slot_free_notify)
@@ -576,7 +625,7 @@
p = swap_info_get(entry);
if (p) {
swap_entry_free(p, entry, 1);
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
}
}
@@ -593,7 +642,7 @@
count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
if (page)
mem_cgroup_uncharge_swapcache(page, entry, count != 0);
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
}
}
@@ -612,7 +661,7 @@
p = swap_info_get(entry);
if (p) {
count = swap_count(p->swap_map[swp_offset(entry)]);
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
}
return count;
}
@@ -700,7 +749,7 @@
page = NULL;
}
}
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
}
if (page) {
/*
@@ -829,11 +878,13 @@
if ((unsigned int)type < nr_swapfiles) {
struct swap_info_struct *sis = swap_info[type];
+ spin_lock(&sis->lock);
if (sis->flags & SWP_WRITEOK) {
n = sis->pages;
if (free)
n -= sis->inuse_pages;
}
+ spin_unlock(&sis->lock);
}
spin_unlock(&swap_lock);
return n;
@@ -1529,7 +1580,7 @@
p->prio = --least_priority;
p->swap_map = swap_map;
p->flags |= SWP_WRITEOK;
- nr_swap_pages += p->pages;
+ atomic_long_add(p->pages, &nr_swap_pages);
total_swap_pages += p->pages;
/* insert swap space into swap_list: */
@@ -1606,14 +1657,16 @@
/* just pick something that's safe... */
swap_list.next = swap_list.head;
}
+ spin_lock(&p->lock);
if (p->prio < 0) {
for (i = p->next; i >= 0; i = swap_info[i]->next)
swap_info[i]->prio = p->prio--;
least_priority++;
}
- nr_swap_pages -= p->pages;
+ atomic_long_sub(p->pages, &nr_swap_pages);
total_swap_pages -= p->pages;
p->flags &= ~SWP_WRITEOK;
+ spin_unlock(&p->lock);
spin_unlock(&swap_lock);
oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
@@ -1638,14 +1691,17 @@
mutex_lock(&swapon_mutex);
spin_lock(&swap_lock);
+ spin_lock(&p->lock);
drain_mmlist();
/* wait for anyone still in scan_swap_map */
p->highest_bit = 0; /* cuts scans short */
while (p->flags >= SWP_SCANNING) {
+ spin_unlock(&p->lock);
spin_unlock(&swap_lock);
schedule_timeout_uninterruptible(1);
spin_lock(&swap_lock);
+ spin_lock(&p->lock);
}
swap_file = p->swap_file;
@@ -1654,6 +1710,7 @@
swap_map = p->swap_map;
p->swap_map = NULL;
p->flags = 0;
+ spin_unlock(&p->lock);
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
vfree(swap_map);
@@ -1857,6 +1914,7 @@
p->flags = SWP_USED;
p->next = -1;
spin_unlock(&swap_lock);
+ spin_lock_init(&p->lock);
return p;
}
@@ -2178,7 +2236,7 @@
if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
nr_to_be_unused += si->inuse_pages;
}
- val->freeswap = nr_swap_pages + nr_to_be_unused;
+ val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
@@ -2211,7 +2269,7 @@
p = swap_info[type];
offset = swp_offset(entry);
- spin_lock(&swap_lock);
+ spin_lock(&p->lock);
if (unlikely(offset >= p->max))
goto unlock_out;
@@ -2246,7 +2304,7 @@
p->swap_map[offset] = count | has_cache;
unlock_out:
- spin_unlock(&swap_lock);
+ spin_unlock(&p->lock);
out:
return err;
@@ -2371,7 +2429,7 @@
}
if (!page) {
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
return -ENOMEM;
}
@@ -2419,7 +2477,7 @@
list_add_tail(&page->lru, &head->lru);
page = NULL; /* now it's attached, don't free it */
out:
- spin_unlock(&swap_lock);
+ spin_unlock(&si->lock);
outer:
if (page)
__free_page(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1438de9..9e95109 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -158,8 +158,28 @@
return &mz->zone->reclaim_stat;
}
+unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+ int nr;
+
+ nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_FILE);
+
+ if (get_nr_swap_pages() > 0)
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+
+ return nr;
+}
+
+bool zone_reclaimable(struct zone *zone)
+{
+ return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
enum lru_list lru)
+
{
if (!mem_cgroup_disabled())
return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
@@ -1648,13 +1668,13 @@
* latencies, so it's better to scan a minimum amount there as
* well.
*/
- if (current_is_kswapd() && mz->zone->all_unreclaimable)
+ if (current_is_kswapd() && !zone_reclaimable(mz->zone))
force_scan = true;
if (!global_reclaim(sc))
force_scan = true;
/* If we have no swap space, do not bother scanning anon pages. */
- if (!sc->may_swap || (nr_swap_pages <= 0)) {
+ if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {
noswap = 1;
fraction[0] = 0;
fraction[1] = 1;
@@ -1798,7 +1818,7 @@
*/
pages_for_compaction = (2UL << sc->order);
inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
- if (nr_swap_pages > 0)
+ if (get_nr_swap_pages() > 0)
inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
if (sc->nr_reclaimed < pages_for_compaction &&
inactive_lru_pages > pages_for_compaction)
@@ -1995,8 +2015,8 @@
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (zone->all_unreclaimable &&
- sc->priority != DEF_PRIORITY)
+ if (sc->priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue; /* Let kswapd poll it */
if (COMPACTION_BUILD) {
/*
@@ -2034,11 +2054,6 @@
return aborted_reclaim;
}
-static bool zone_reclaimable(struct zone *zone)
-{
- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
-}
-
/* All zones in zonelist are unreclaimable? */
static bool all_unreclaimable(struct zonelist *zonelist,
struct scan_control *sc)
@@ -2052,7 +2067,7 @@
continue;
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (!zone->all_unreclaimable)
+ if (zone_reclaimable(zone))
return false;
}
@@ -2377,7 +2392,7 @@
* they must be considered balanced here as well if kswapd
* is to sleep
*/
- if (zone->all_unreclaimable) {
+ if (!zone_reclaimable(zone)) {
balanced += zone->present_pages;
continue;
}
@@ -2470,8 +2485,8 @@
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable &&
- sc.priority != DEF_PRIORITY)
+ if (sc.priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue;
/*
@@ -2525,8 +2540,8 @@
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable &&
- sc.priority != DEF_PRIORITY)
+ if (sc.priority != DEF_PRIORITY &&
+ !zone_reclaimable(zone))
continue;
sc.nr_scanned = 0;
@@ -2576,8 +2591,6 @@
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
total_scanned += sc.nr_scanned;
- if (nr_slab == 0 && !zone_reclaimable(zone))
- zone->all_unreclaimable = 1;
}
/*
@@ -2589,7 +2602,7 @@
total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
sc.may_writepage = 1;
- if (zone->all_unreclaimable) {
+ if (!zone_reclaimable(zone)) {
if (end_zone && end_zone == i)
end_zone--;
continue;
@@ -2912,27 +2925,13 @@
nr = global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_FILE);
- if (nr_swap_pages > 0)
+ if (get_nr_swap_pages() > 0)
nr += global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_INACTIVE_ANON);
return nr;
}
-unsigned long zone_reclaimable_pages(struct zone *zone)
-{
- int nr;
-
- nr = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_FILE);
-
- if (nr_swap_pages > 0)
- nr += zone_page_state(zone, NR_ACTIVE_ANON) +
- zone_page_state(zone, NR_INACTIVE_ANON);
-
- return nr;
-}
-
#ifdef CONFIG_HIBERNATION
/*
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
@@ -3227,7 +3226,7 @@
zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
return ZONE_RECLAIM_FULL;
- if (zone->all_unreclaimable)
+ if (!zone_reclaimable(zone))
return ZONE_RECLAIM_FULL;
/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 959a558..9559032 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -19,6 +19,9 @@
#include <linux/math64.h>
#include <linux/writeback.h>
#include <linux/compaction.h>
+#include <linux/mm_inline.h>
+
+#include "internal.h"
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
@@ -1027,7 +1030,7 @@
"\n all_unreclaimable: %u"
"\n start_pfn: %lu"
"\n inactive_ratio: %u",
- zone->all_unreclaimable,
+ !zone_reclaimable(zone),
zone->zone_start_pfn,
zone->inactive_ratio);
seq_putc(m, '\n');