blob: 85d74373002c0b3f500f6d69ff8631da7ca1564f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_SWAP_H
2#define _LINUX_SWAP_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/spinlock.h>
5#include <linux/linkage.h>
6#include <linux/mmzone.h>
7#include <linux/list.h>
Balbir Singh66e17072008-02-07 00:13:56 -08008#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/sched.h>
Lee Schermerhornaf936a12008-10-18 20:26:53 -070010#include <linux/node.h>
Shaohua Li33806f02013-02-22 16:34:37 -080011#include <linux/fs.h>
Arun Sharma600634972011-07-26 16:09:06 -070012#include <linux/atomic.h>
Mel Gormanc53954a2013-07-03 15:02:34 -070013#include <linux/page-flags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm/page.h>
15
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -070016struct notifier_block;
17
Andrew Mortonab954162006-09-25 23:32:42 -070018struct bio;
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
21#define SWAP_FLAG_PRIO_MASK 0x7fff
22#define SWAP_FLAG_PRIO_SHIFT 0
Hugh Dickins33994462010-09-09 16:38:11 -070023#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Hugh Dickinsd15cab92012-03-28 14:42:42 -070025#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
26 SWAP_FLAG_DISCARD)
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028static inline int current_is_kswapd(void)
29{
30 return current->flags & PF_KSWAPD;
31}
32
33/*
34 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
35 * be swapped to. The swap type and the offset into that swap type are
36 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
37 * for the type means that the maximum number of swapcache pages is 27 bits
38 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
39 * the type/offset into the pte as 5/27 as well.
40 */
41#define MAX_SWAPFILES_SHIFT 5
Andi Kleena7420aa2009-09-16 11:50:05 +020042
43/*
44 * Use some of the swap files numbers for other purposes. This
45 * is a convenient way to hook into the VM to trigger special
46 * actions on faults.
47 */
48
49/*
50 * NUMA node memory migration support
51 */
52#ifdef CONFIG_MIGRATION
53#define SWP_MIGRATION_NUM 2
54#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
55#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
Christoph Lameter06972122006-06-23 02:03:35 -070056#else
Andi Kleena7420aa2009-09-16 11:50:05 +020057#define SWP_MIGRATION_NUM 0
Christoph Lameter06972122006-06-23 02:03:35 -070058#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60/*
Andi Kleena7420aa2009-09-16 11:50:05 +020061 * Handling of hardware poisoned pages with memory corruption.
62 */
63#ifdef CONFIG_MEMORY_FAILURE
64#define SWP_HWPOISON_NUM 1
65#define SWP_HWPOISON MAX_SWAPFILES
66#else
67#define SWP_HWPOISON_NUM 0
68#endif
69
70#define MAX_SWAPFILES \
71 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
72
73/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 * Magic header for a swap area. The first part of the union is
75 * what the swap magic looks like for the old (limited to 128MB)
76 * swap area format, the second part of the union adds - in the
77 * old reserved area - some extra information. Note that the first
78 * kilobyte is reserved for boot loader or disk label stuff...
79 *
80 * Having the magic at the end of the PAGE_SIZE makes detecting swap
81 * areas somewhat tricky on machines that support multiple page sizes.
82 * For 2.5 we'll probably want to move the magic to just beyond the
83 * bootbits...
84 */
85union swap_header {
86 struct {
87 char reserved[PAGE_SIZE - 10];
88 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
89 } magic;
90 struct {
Andreas Dilgere8f03d02006-06-23 02:03:14 -070091 char bootbits[1024]; /* Space for disklabel etc. */
92 __u32 version;
93 __u32 last_page;
94 __u32 nr_badpages;
95 unsigned char sws_uuid[16];
96 unsigned char sws_volume[16];
97 __u32 padding[117];
98 __u32 badpages[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 } info;
100};
101
102 /* A swap entry has to fit into a "unsigned long", as
103 * the entry is hidden in the "index" field of the
104 * swapper address space.
105 */
106typedef struct {
107 unsigned long val;
108} swp_entry_t;
109
110/*
111 * current->reclaim_state points to one of these when a task is running
112 * memory reclaim
113 */
114struct reclaim_state {
115 unsigned long reclaimed_slab;
116};
117
118#ifdef __KERNEL__
119
120struct address_space;
121struct sysinfo;
122struct writeback_control;
123struct zone;
124
125/*
126 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
127 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
128 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
129 * from setup, they're handled identically.
130 *
131 * We always assume that blocks are of size PAGE_SIZE.
132 */
133struct swap_extent {
134 struct list_head list;
135 pgoff_t start_page;
136 pgoff_t nr_pages;
137 sector_t start_block;
138};
139
140/*
141 * Max bad pages in the new format..
142 */
143#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
144#define MAX_SWAP_BADPAGES \
145 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
146
147enum {
148 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
149 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
Hugh Dickins33994462010-09-09 16:38:11 -0700150 SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
Hugh Dickins7992fde2009-01-06 14:39:53 -0800151 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
Hugh Dickins20137a42009-01-06 14:39:54 -0800152 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
Hugh Dickins570a335b2009-12-14 17:58:46 -0800153 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
Nitin Guptab2725642010-05-17 11:02:42 +0530154 SWP_BLKDEV = (1 << 6), /* its a block device */
Mel Gorman62c230b2012-07-31 16:44:55 -0700155 SWP_FILE = (1 << 7), /* set after swap_activate success */
Hugh Dickins52b7efdb2005-09-03 15:54:39 -0700156 /* add others here before... */
157 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158};
159
Johannes Weinerd778df52013-02-22 16:32:12 -0800160#define SWAP_CLUSTER_MAX 32UL
Mel Gorman748446b2010-05-24 14:32:27 -0700161#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Mel Gorman8afdcec2011-03-22 16:33:04 -0700163/*
164 * Ratio between the present memory in the zone and the "gap" that
165 * we're allowing kswapd to shrink in addition to the per-zone high
166 * wmark, even for zones that already have the high wmark satisfied,
167 * in order to provide better per-zone lru behavior. We are ok to
168 * spend not more than 1% of the memory for this zone balancing "gap".
169 */
170#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
171
Hugh Dickins570a335b2009-12-14 17:58:46 -0800172#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
173#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
174#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
175#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
176#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800177#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
Hugh Dickins253d5532009-12-14 17:58:44 -0800178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179/*
180 * The in-memory structure used to track swap areas.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 */
182struct swap_info_struct {
Hugh Dickinsefa90a92009-12-14 17:58:41 -0800183 unsigned long flags; /* SWP_USED etc: see above */
184 signed short prio; /* swap priority of this type */
185 signed char type; /* strange name for an index */
186 signed char next; /* next type on the swap list */
Hugh Dickins75097652009-12-14 17:58:48 -0800187 unsigned int max; /* extent of the swap_map */
188 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
189 unsigned int lowest_bit; /* index of first free in swap_map */
190 unsigned int highest_bit; /* index of last free in swap_map */
191 unsigned int pages; /* total of usable pages of swap */
192 unsigned int inuse_pages; /* number of those currently in use */
193 unsigned int cluster_next; /* likely index for next allocation */
194 unsigned int cluster_nr; /* countdown to next cluster search */
Hugh Dickins7992fde2009-01-06 14:39:53 -0800195 unsigned int lowest_alloc; /* while preparing discard cluster */
196 unsigned int highest_alloc; /* while preparing discard cluster */
Hugh Dickins75097652009-12-14 17:58:48 -0800197 struct swap_extent *curr_swap_extent;
198 struct swap_extent first_swap_extent;
199 struct block_device *bdev; /* swap device or bdev of swap file */
200 struct file *swap_file; /* seldom referenced */
201 unsigned int old_block_size; /* seldom referenced */
Dan Magenheimer38b5faf2012-04-09 17:08:06 -0600202#ifdef CONFIG_FRONTSWAP
203 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
204 atomic_t frontswap_pages; /* frontswap pages in-use counter */
205#endif
Shaohua Liec8acf22013-02-22 16:34:38 -0800206 spinlock_t lock; /*
207 * protect map scan related fields like
208 * swap_map, lowest_bit, highest_bit,
209 * inuse_pages, cluster_next,
210 * cluster_nr, lowest_alloc and
211 * highest_alloc. other fields are only
212 * changed at swapon/swapoff, so are
213 * protected by swap_lock. changing
214 * flags need hold this lock and
215 * swap_lock. If both locks need hold,
216 * hold swap_lock first.
217 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218};
219
220struct swap_list_t {
221 int head; /* head of priority-ordered swapfile list */
222 int next; /* swapfile to be used next */
223};
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/* linux/mm/page_alloc.c */
226extern unsigned long totalram_pages;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700227extern unsigned long totalreserve_pages;
Johannes Weinerab8fabd2012-01-10 15:07:42 -0800228extern unsigned long dirty_balance_reserve;
Zhang Yanfeiebec3862013-02-22 16:35:43 -0800229extern unsigned long nr_free_buffer_pages(void);
230extern unsigned long nr_free_pagecache_pages(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Christoph Lameter96177292007-02-10 01:43:03 -0800232/* Definition of global_page_state not available yet */
233#define nr_free_pages() global_page_state(NR_FREE_PAGES)
234
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/* linux/mm/swap.c */
Mel Gormanc53954a2013-07-03 15:02:34 -0700237extern void __lru_cache_add(struct page *);
238extern void lru_cache_add(struct page *);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700239extern void lru_add_page_tail(struct page *page, struct page *page_tail,
Shaohua Li5bc7b8a2013-04-29 15:08:36 -0700240 struct lruvec *lruvec, struct list_head *head);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800241extern void activate_page(struct page *);
242extern void mark_page_accessed(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243extern void lru_add_drain(void);
Konstantin Khlebnikovf0cb3c72012-03-21 16:34:06 -0700244extern void lru_add_drain_cpu(int cpu);
Nick Piggin053837f2006-01-18 17:42:27 -0800245extern int lru_add_drain_all(void);
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700246extern void rotate_reclaimable_page(struct page *page);
Minchan Kim31560182011-03-22 16:32:52 -0700247extern void deactivate_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248extern void swap_setup(void);
249
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700250extern void add_page_to_unevictable_list(struct page *page);
251
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700252/**
253 * lru_cache_add: add a page to the page lists
254 * @page: the page to add
255 */
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700256static inline void lru_cache_add_anon(struct page *page)
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700257{
Mel Gormanc53954a2013-07-03 15:02:34 -0700258 ClearPageActive(page);
259 __lru_cache_add(page);
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700260}
261
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700262static inline void lru_cache_add_file(struct page *page)
263{
Mel Gormanc53954a2013-07-03 15:02:34 -0700264 ClearPageActive(page);
265 __lru_cache_add(page);
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/* linux/mm/vmscan.c */
Mel Gormandac1d272008-04-28 02:12:12 -0700269extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700270 gfp_t gfp_mask, nodemask_t *mask);
Konstantin Khlebnikovf3fd4a62012-05-29 15:06:54 -0700271extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
Johannes Weiner185efc02011-09-14 16:21:58 -0700272extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
273 gfp_t gfp_mask, bool noswap);
274extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
275 gfp_t gfp_mask, bool noswap,
276 struct zone *zone,
277 unsigned long *nr_scanned);
Andrew Morton69e05942006-03-22 00:08:19 -0800278extern unsigned long shrink_all_memory(unsigned long nr_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279extern int vm_swappiness;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800280extern int remove_mapping(struct address_space *mapping, struct page *page);
Zhang Yanfeib21e0b92013-02-22 16:35:48 -0800281extern unsigned long vm_total_pages;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800282
Christoph Lameter9eeff232006-01-18 17:42:31 -0800283#ifdef CONFIG_NUMA
284extern int zone_reclaim_mode;
Christoph Lameter96146342006-07-03 00:24:13 -0700285extern int sysctl_min_unmapped_ratio;
Christoph Lameter0ff38492006-09-25 23:31:52 -0700286extern int sysctl_min_slab_ratio;
Christoph Lameter9eeff232006-01-18 17:42:31 -0800287extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
288#else
289#define zone_reclaim_mode 0
290static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
291{
292 return 0;
293}
294#endif
295
Hugh Dickins39b5f292012-10-08 16:33:18 -0700296extern int page_evictable(struct page *page);
Hugh Dickins24513262012-01-20 14:34:21 -0800297extern void check_move_unevictable_pages(struct page **, int nr_pages);
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700298
299extern unsigned long scan_unevictable_pages;
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700300extern int scan_unevictable_handler(struct ctl_table *, int,
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700301 void __user *, size_t *, loff_t *);
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -0700302#ifdef CONFIG_NUMA
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700303extern int scan_unevictable_register_node(struct node *node);
304extern void scan_unevictable_unregister_node(struct node *node);
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -0700305#else
306static inline int scan_unevictable_register_node(struct node *node)
307{
308 return 0;
309}
310static inline void scan_unevictable_unregister_node(struct node *node)
311{
312}
313#endif
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700314
Yasunori Goto3218ae12006-06-27 02:53:33 -0700315extern int kswapd_run(int nid);
David Rientjes8fe23e02009-12-14 17:58:33 -0800316extern void kswapd_stop(int nid);
Andrew Mortonc255a452012-07-31 16:43:02 -0700317#ifdef CONFIG_MEMCG
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -0700318extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
319#else
320static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
321{
322 return vm_swappiness;
323}
324#endif
Andrew Mortonc255a452012-07-31 16:43:02 -0700325#ifdef CONFIG_MEMCG_SWAP
Michal Hockodac23b02012-04-05 14:25:16 -0700326extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
327#else
328static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
329{
330}
331#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332#ifdef CONFIG_SWAP
333/* linux/mm/page_io.c */
Minchan Kimaca8bf32009-06-16 15:33:02 -0700334extern int swap_readpage(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335extern int swap_writepage(struct page *page, struct writeback_control *wbc);
Seth Jennings1eec6702013-04-29 15:08:35 -0700336extern void end_swap_bio_write(struct bio *bio, int err);
337extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
338 void (*end_write_func)(struct bio *, int));
Mel Gorman62c230b2012-07-31 16:44:55 -0700339extern int swap_set_page_dirty(struct page *page);
NeilBrown6712ecf2007-09-27 12:47:43 +0200340extern void end_swap_bio_read(struct bio *bio, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Mel Gormana509bc12012-07-31 16:44:57 -0700342int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
343 unsigned long nr_pages, sector_t start_block);
344int generic_swapfile_activate(struct swap_info_struct *, struct file *,
345 sector_t *);
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347/* linux/mm/swap_state.c */
Shaohua Li33806f02013-02-22 16:34:37 -0800348extern struct address_space swapper_spaces[];
349#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
350extern unsigned long total_swapcache_pages(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351extern void show_swap_cache_info(void);
Shaohua Li5bc7b8a2013-04-29 15:08:36 -0700352extern int add_to_swap(struct page *, struct list_head *list);
Hugh Dickins73b12622008-02-04 22:28:50 -0800353extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
Seth Jennings2f772e62013-04-29 15:08:34 -0700354extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355extern void __delete_from_swap_cache(struct page *);
356extern void delete_from_swap_cache(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357extern void free_page_and_swap_cache(struct page *);
358extern void free_pages_and_swap_cache(struct page **, int);
Hugh Dickins46017e92008-02-04 22:28:41 -0800359extern struct page *lookup_swap_cache(swp_entry_t);
Hugh Dickins02098fe2008-02-04 22:28:42 -0800360extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
Hugh Dickins46017e92008-02-04 22:28:41 -0800361 struct vm_area_struct *vma, unsigned long addr);
Hugh Dickins02098fe2008-02-04 22:28:42 -0800362extern struct page *swapin_readahead(swp_entry_t, gfp_t,
Hugh Dickins46017e92008-02-04 22:28:41 -0800363 struct vm_area_struct *vma, unsigned long addr);
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365/* linux/mm/swapfile.c */
Shaohua Liec8acf22013-02-22 16:34:38 -0800366extern atomic_long_t nr_swap_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367extern long total_swap_pages;
Shaohua Liec8acf22013-02-22 16:34:38 -0800368
369/* Swap 50% full? Release swapcache more aggressively.. */
370static inline bool vm_swap_full(void)
371{
372 return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
373}
374
375static inline long get_nr_swap_pages(void)
376{
377 return atomic_long_read(&nr_swap_pages);
378}
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380extern void si_swapinfo(struct sysinfo *);
381extern swp_entry_t get_swap_page(void);
Hugh Dickins910321e2010-09-09 16:38:07 -0700382extern swp_entry_t get_swap_page_of_type(int);
Hugh Dickins570a335b2009-12-14 17:58:46 -0800383extern int add_swap_count_continuation(swp_entry_t, gfp_t);
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800384extern void swap_shmem_alloc(swp_entry_t);
Hugh Dickins570a335b2009-12-14 17:58:46 -0800385extern int swap_duplicate(swp_entry_t);
386extern int swapcache_prepare(swp_entry_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387extern void swap_free(swp_entry_t);
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700388extern void swapcache_free(swp_entry_t, struct page *page);
Hugh Dickins2509ef22009-01-06 14:40:10 -0800389extern int free_swap_and_cache(swp_entry_t);
Rafael J. Wysocki7bf23682007-01-05 16:36:28 -0800390extern int swap_type_of(dev_t, sector_t, struct block_device **);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800391extern unsigned int count_swap_pages(int, int);
Lee Schermerhornd4906e12009-12-14 17:58:49 -0800392extern sector_t map_swap_page(struct page *, struct block_device **);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800393extern sector_t swapdev_block(int, pgoff_t);
Hugh Dickinsbde05d12012-05-29 15:06:38 -0700394extern int page_swapcount(struct page *);
Mel Gormanf981c592012-07-31 16:44:47 -0700395extern struct swap_info_struct *page_swap_info(struct page *);
Hugh Dickins7b1fe592009-01-06 14:39:34 -0800396extern int reuse_swap_page(struct page *);
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800397extern int try_to_free_swap(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398struct backing_dev_info;
399
Andrew Mortonc255a452012-07-31 16:43:02 -0700400#ifdef CONFIG_MEMCG
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700401extern void
402mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800403#else
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800404static inline void
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700405mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800406{
407}
408#endif
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410#else /* CONFIG_SWAP */
411
Shaohua Liec8acf22013-02-22 16:34:38 -0800412#define get_nr_swap_pages() 0L
Hugh Dickinsb9627162009-01-06 14:39:41 -0800413#define total_swap_pages 0L
Shaohua Li33806f02013-02-22 16:34:37 -0800414#define total_swapcache_pages() 0UL
Shaohua Liec8acf22013-02-22 16:34:38 -0800415#define vm_swap_full() 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
417#define si_swapinfo(val) \
418 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
Olaf Hering9ae5b3c2005-08-07 09:42:24 -0700419/* only sparc can not include linux/pagemap.h in this file
420 * so leave page_cache_release and release_pages undeclared... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421#define free_page_and_swap_cache(page) \
422 page_cache_release(page)
423#define free_pages_and_swap_cache(pages, nr) \
424 release_pages((pages), (nr), 0);
425
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700426static inline void show_swap_cache_info(void)
427{
428}
429
Hugh Dickins2509ef22009-01-06 14:40:10 -0800430#define free_swap_and_cache(swp) is_migration_entry(swp)
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700431#define swapcache_prepare(swp) is_migration_entry(swp)
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700432
Hugh Dickins570a335b2009-12-14 17:58:46 -0800433static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700434{
Hugh Dickins570a335b2009-12-14 17:58:46 -0800435 return 0;
436}
437
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800438static inline void swap_shmem_alloc(swp_entry_t swp)
439{
440}
441
Hugh Dickins570a335b2009-12-14 17:58:46 -0800442static inline int swap_duplicate(swp_entry_t swp)
443{
444 return 0;
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700445}
446
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700447static inline void swap_free(swp_entry_t swp)
448{
449}
450
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700451static inline void swapcache_free(swp_entry_t swp, struct page *page)
452{
453}
454
Hugh Dickins02098fe2008-02-04 22:28:42 -0800455static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700456 struct vm_area_struct *vma, unsigned long addr)
457{
458 return NULL;
459}
460
Hugh Dickins9fab5612009-03-31 15:23:33 -0700461static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
462{
463 return 0;
464}
465
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700466static inline struct page *lookup_swap_cache(swp_entry_t swp)
467{
468 return NULL;
469}
470
Shaohua Li5bc7b8a2013-04-29 15:08:36 -0700471static inline int add_to_swap(struct page *page, struct list_head *list)
Hugh Dickins60371d92009-01-06 14:39:40 -0800472{
473 return 0;
474}
475
Hugh Dickins73b12622008-02-04 22:28:50 -0800476static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
477 gfp_t gfp_mask)
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700478{
Hugh Dickins73b12622008-02-04 22:28:50 -0800479 return -1;
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700480}
481
482static inline void __delete_from_swap_cache(struct page *page)
483{
484}
485
486static inline void delete_from_swap_cache(struct page *page)
487{
488}
489
Hugh Dickinsbde05d12012-05-29 15:06:38 -0700490static inline int page_swapcount(struct page *page)
491{
492 return 0;
493}
494
Hugh Dickins7b1fe592009-01-06 14:39:34 -0800495#define reuse_swap_page(page) (page_mapcount(page) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800497static inline int try_to_free_swap(struct page *page)
Rik van Riel68a223942008-10-18 20:26:23 -0700498{
499 return 0;
500}
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502static inline swp_entry_t get_swap_page(void)
503{
504 swp_entry_t entry;
505 entry.val = 0;
506 return entry;
507}
508
Daisuke Nishimurae767e052009-05-28 14:34:28 -0700509static inline void
510mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
511{
512}
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514#endif /* CONFIG_SWAP */
515#endif /* __KERNEL__*/
516#endif /* _LINUX_SWAP_H */