mm: reuse_swap_page replaces can_share_swap_page
A good place to free up old swap is where do_wp_page(), or do_swap_page(),
is about to redirty the page: the data on disk is then stale and won't be
read again; and if we do decide to write the page out later, using the
previous swap location makes an unnecessary disk seek very likely.
So give can_share_swap_page() the side-effect of delete_from_swap_cache()
when it safely can. And can_share_swap_page() was always a misleading
name, the more so if it has a side-effect: rename it reuse_swap_page().
Irrelevant cleanup nearby: remove swap_token_default_timeout definition
from swap.h: it's used nowhere.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 48f309d..366556c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -304,7 +304,7 @@
extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
extern sector_t swapdev_block(int, pgoff_t);
extern struct swap_info_struct *get_swap_info_struct(unsigned);
-extern int can_share_swap_page(struct page *);
+extern int reuse_swap_page(struct page *);
extern int remove_exclusive_swap_page(struct page *);
extern int remove_exclusive_swap_page_ref(struct page *);
struct backing_dev_info;
@@ -372,8 +372,6 @@
return NULL;
}
-#define can_share_swap_page(p) (page_mapcount(p) == 1)
-
static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp_mask)
{
@@ -388,7 +386,7 @@
{
}
-#define swap_token_default_timeout 0
+#define reuse_swap_page(page) (page_mapcount(page) == 1)
static inline int remove_exclusive_swap_page(struct page *p)
{
diff --git a/mm/memory.c b/mm/memory.c
index 3922ffc..8f471ed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1861,7 +1861,7 @@
}
page_cache_release(old_page);
}
- reuse = can_share_swap_page(old_page);
+ reuse = reuse_swap_page(old_page);
unlock_page(old_page);
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
@@ -2392,7 +2392,7 @@
inc_mm_counter(mm, anon_rss);
pte = mk_pte(page, vma->vm_page_prot);
- if (write_access && can_share_swap_page(page)) {
+ if (write_access && reuse_swap_page(page)) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
write_access = 0;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 214e90b..bfd4ee5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -326,17 +326,24 @@
}
/*
- * We can use this swap cache entry directly
- * if there are no other references to it.
+ * We can write to an anon page without COW if there are no other references
+ * to it. And as a side-effect, free up its swap: because the old content
+ * on disk will never be read, and seeking back there to write new content
+ * later would only waste time away from clustering.
*/
-int can_share_swap_page(struct page *page)
+int reuse_swap_page(struct page *page)
{
int count;
VM_BUG_ON(!PageLocked(page));
count = page_mapcount(page);
- if (count <= 1 && PageSwapCache(page))
+ if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
+ if (count == 1 && !PageWriteback(page)) {
+ delete_from_swap_cache(page);
+ SetPageDirty(page);
+ }
+ }
return count == 1;
}