blob: 14d62490922e12bccc319a0827ec22a1d5c3a1b3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_SWAP_H
2#define _LINUX_SWAP_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/spinlock.h>
5#include <linux/linkage.h>
6#include <linux/mmzone.h>
7#include <linux/list.h>
Balbir Singh66e17072008-02-07 00:13:56 -08008#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/sched.h>
Lee Schermerhornaf936a12008-10-18 20:26:53 -070010#include <linux/node.h>
Andrew Morton542d1c82005-07-12 13:58:31 -070011
Arun Sharma600634972011-07-26 16:09:06 -070012#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/page.h>
14
Martin Schwidefsky8bc719d2006-09-25 23:31:20 -070015struct notifier_block;
16
Andrew Mortonab954162006-09-25 23:32:42 -070017struct bio;
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
20#define SWAP_FLAG_PRIO_MASK 0x7fff
21#define SWAP_FLAG_PRIO_SHIFT 0
Hugh Dickins33994462010-09-09 16:38:11 -070022#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24static inline int current_is_kswapd(void)
25{
26 return current->flags & PF_KSWAPD;
27}
28
29/*
30 * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
31 * be swapped to. The swap type and the offset into that swap type are
32 * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
33 * for the type means that the maximum number of swapcache pages is 27 bits
34 * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
35 * the type/offset into the pte as 5/27 as well.
36 */
37#define MAX_SWAPFILES_SHIFT 5
Andi Kleena7420aa2009-09-16 11:50:05 +020038
39/*
40 * Use some of the swap files numbers for other purposes. This
41 * is a convenient way to hook into the VM to trigger special
42 * actions on faults.
43 */
44
45/*
46 * NUMA node memory migration support
47 */
48#ifdef CONFIG_MIGRATION
49#define SWP_MIGRATION_NUM 2
50#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
51#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
Christoph Lameter06972122006-06-23 02:03:35 -070052#else
Andi Kleena7420aa2009-09-16 11:50:05 +020053#define SWP_MIGRATION_NUM 0
Christoph Lameter06972122006-06-23 02:03:35 -070054#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56/*
Andi Kleena7420aa2009-09-16 11:50:05 +020057 * Handling of hardware poisoned pages with memory corruption.
58 */
59#ifdef CONFIG_MEMORY_FAILURE
60#define SWP_HWPOISON_NUM 1
61#define SWP_HWPOISON MAX_SWAPFILES
62#else
63#define SWP_HWPOISON_NUM 0
64#endif
65
66#define MAX_SWAPFILES \
67 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
68
69/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 * Magic header for a swap area. The first part of the union is
71 * what the swap magic looks like for the old (limited to 128MB)
72 * swap area format, the second part of the union adds - in the
73 * old reserved area - some extra information. Note that the first
74 * kilobyte is reserved for boot loader or disk label stuff...
75 *
76 * Having the magic at the end of the PAGE_SIZE makes detecting swap
77 * areas somewhat tricky on machines that support multiple page sizes.
78 * For 2.5 we'll probably want to move the magic to just beyond the
79 * bootbits...
80 */
81union swap_header {
82 struct {
83 char reserved[PAGE_SIZE - 10];
84 char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
85 } magic;
86 struct {
Andreas Dilgere8f03d02006-06-23 02:03:14 -070087 char bootbits[1024]; /* Space for disklabel etc. */
88 __u32 version;
89 __u32 last_page;
90 __u32 nr_badpages;
91 unsigned char sws_uuid[16];
92 unsigned char sws_volume[16];
93 __u32 padding[117];
94 __u32 badpages[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 } info;
96};
97
98 /* A swap entry has to fit into a "unsigned long", as
99 * the entry is hidden in the "index" field of the
100 * swapper address space.
101 */
102typedef struct {
103 unsigned long val;
104} swp_entry_t;
105
106/*
107 * current->reclaim_state points to one of these when a task is running
108 * memory reclaim
109 */
110struct reclaim_state {
111 unsigned long reclaimed_slab;
112};
113
114#ifdef __KERNEL__
115
116struct address_space;
117struct sysinfo;
118struct writeback_control;
119struct zone;
120
121/*
122 * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
123 * disk blocks. A list of swap extents maps the entire swapfile. (Where the
124 * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
125 * from setup, they're handled identically.
126 *
127 * We always assume that blocks are of size PAGE_SIZE.
128 */
129struct swap_extent {
130 struct list_head list;
131 pgoff_t start_page;
132 pgoff_t nr_pages;
133 sector_t start_block;
134};
135
136/*
137 * Max bad pages in the new format..
138 */
139#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
140#define MAX_SWAP_BADPAGES \
141 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
142
143enum {
144 SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
145 SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
Hugh Dickins33994462010-09-09 16:38:11 -0700146 SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */
Hugh Dickins7992fde2009-01-06 14:39:53 -0800147 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
Hugh Dickins20137a42009-01-06 14:39:54 -0800148 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
Hugh Dickins570a335b2009-12-14 17:58:46 -0800149 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
Nitin Guptab2725642010-05-17 11:02:42 +0530150 SWP_BLKDEV = (1 << 6), /* its a block device */
Hugh Dickins52b7efdb2005-09-03 15:54:39 -0700151 /* add others here before... */
152 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153};
154
155#define SWAP_CLUSTER_MAX 32
Mel Gorman748446b2010-05-24 14:32:27 -0700156#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Mel Gorman8afdcec2011-03-22 16:33:04 -0700158/*
159 * Ratio between the present memory in the zone and the "gap" that
160 * we're allowing kswapd to shrink in addition to the per-zone high
161 * wmark, even for zones that already have the high wmark satisfied,
162 * in order to provide better per-zone lru behavior. We are ok to
163 * spend not more than 1% of the memory for this zone balancing "gap".
164 */
165#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
166
Hugh Dickins570a335b2009-12-14 17:58:46 -0800167#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
168#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
169#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
170#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
171#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800172#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
Hugh Dickins253d5532009-12-14 17:58:44 -0800173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
175 * The in-memory structure used to track swap areas.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 */
177struct swap_info_struct {
Hugh Dickinsefa90a92009-12-14 17:58:41 -0800178 unsigned long flags; /* SWP_USED etc: see above */
179 signed short prio; /* swap priority of this type */
180 signed char type; /* strange name for an index */
181 signed char next; /* next type on the swap list */
Hugh Dickins75097652009-12-14 17:58:48 -0800182 unsigned int max; /* extent of the swap_map */
183 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
184 unsigned int lowest_bit; /* index of first free in swap_map */
185 unsigned int highest_bit; /* index of last free in swap_map */
186 unsigned int pages; /* total of usable pages of swap */
187 unsigned int inuse_pages; /* number of those currently in use */
188 unsigned int cluster_next; /* likely index for next allocation */
189 unsigned int cluster_nr; /* countdown to next cluster search */
Hugh Dickins7992fde2009-01-06 14:39:53 -0800190 unsigned int lowest_alloc; /* while preparing discard cluster */
191 unsigned int highest_alloc; /* while preparing discard cluster */
Hugh Dickins75097652009-12-14 17:58:48 -0800192 struct swap_extent *curr_swap_extent;
193 struct swap_extent first_swap_extent;
194 struct block_device *bdev; /* swap device or bdev of swap file */
195 struct file *swap_file; /* seldom referenced */
196 unsigned int old_block_size; /* seldom referenced */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197};
198
199struct swap_list_t {
200 int head; /* head of priority-ordered swapfile list */
201 int next; /* swapfile to be used next */
202};
203
204/* Swap 50% full? Release swapcache more aggressively.. */
205#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207/* linux/mm/page_alloc.c */
208extern unsigned long totalram_pages;
Hideo AOKIcb45b0e2006-04-10 22:52:59 -0700209extern unsigned long totalreserve_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210extern unsigned int nr_free_buffer_pages(void);
211extern unsigned int nr_free_pagecache_pages(void);
212
Christoph Lameter96177292007-02-10 01:43:03 -0800213/* Definition of global_page_state not available yet */
214#define nr_free_pages() global_page_state(NR_FREE_PAGES)
215
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217/* linux/mm/swap.c */
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700218extern void __lru_cache_add(struct page *, enum lru_list lru);
219extern void lru_cache_add_lru(struct page *, enum lru_list lru);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800220extern void lru_add_page_tail(struct zone* zone,
221 struct page *page, struct page *page_tail);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800222extern void activate_page(struct page *);
223extern void mark_page_accessed(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224extern void lru_add_drain(void);
Nick Piggin053837f2006-01-18 17:42:27 -0800225extern int lru_add_drain_all(void);
Miklos Szerediac6aadb2008-04-28 02:12:38 -0700226extern void rotate_reclaimable_page(struct page *page);
Minchan Kim31560182011-03-22 16:32:52 -0700227extern void deactivate_page(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228extern void swap_setup(void);
229
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700230extern void add_page_to_unevictable_list(struct page *page);
231
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700232/**
233 * lru_cache_add: add a page to the page lists
234 * @page: the page to add
235 */
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700236static inline void lru_cache_add_anon(struct page *page)
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700237{
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700238 __lru_cache_add(page, LRU_INACTIVE_ANON);
KOSAKI Motohirof04e9eb2008-10-18 20:26:19 -0700239}
240
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700241static inline void lru_cache_add_file(struct page *page)
242{
243 __lru_cache_add(page, LRU_INACTIVE_FILE);
244}
245
Mel Gormanc175a0c2010-05-24 14:32:26 -0700246/* LRU Isolation modes. */
247#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
248#define ISOLATE_ACTIVE 1 /* Isolate active pages. */
249#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/* linux/mm/vmscan.c */
Mel Gormandac1d272008-04-28 02:12:12 -0700252extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -0700253 gfp_t gfp_mask, nodemask_t *mask);
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700254extern int __isolate_lru_page(struct page *page, int mode, int file);
Andrew Morton69e05942006-03-22 00:08:19 -0800255extern unsigned long shrink_all_memory(unsigned long nr_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256extern int vm_swappiness;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800257extern int remove_mapping(struct address_space *mapping, struct page *page);
Andrew Mortonbd1e22b2006-06-23 02:03:47 -0700258extern long vm_total_pages;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800259
Christoph Lameter9eeff232006-01-18 17:42:31 -0800260#ifdef CONFIG_NUMA
261extern int zone_reclaim_mode;
Christoph Lameter96146342006-07-03 00:24:13 -0700262extern int sysctl_min_unmapped_ratio;
Christoph Lameter0ff38492006-09-25 23:31:52 -0700263extern int sysctl_min_slab_ratio;
Christoph Lameter9eeff232006-01-18 17:42:31 -0800264extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
265#else
266#define zone_reclaim_mode 0
267static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
268{
269 return 0;
270}
271#endif
272
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700273extern int page_evictable(struct page *page, struct vm_area_struct *vma);
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -0700274extern void scan_mapping_unevictable_pages(struct address_space *);
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700275
276extern unsigned long scan_unevictable_pages;
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700277extern int scan_unevictable_handler(struct ctl_table *, int,
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700278 void __user *, size_t *, loff_t *);
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -0700279#ifdef CONFIG_NUMA
Lee Schermerhornaf936a12008-10-18 20:26:53 -0700280extern int scan_unevictable_register_node(struct node *node);
281extern void scan_unevictable_unregister_node(struct node *node);
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -0700282#else
283static inline int scan_unevictable_register_node(struct node *node)
284{
285 return 0;
286}
287static inline void scan_unevictable_unregister_node(struct node *node)
288{
289}
290#endif
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700291
Yasunori Goto3218ae12006-06-27 02:53:33 -0700292extern int kswapd_run(int nid);
David Rientjes8fe23e02009-12-14 17:58:33 -0800293extern void kswapd_stop(int nid);
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -0700294#ifdef CONFIG_CGROUP_MEM_RES_CTLR
295extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
296#else
297static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
298{
299 return vm_swappiness;
300}
301#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302#ifdef CONFIG_SWAP
303/* linux/mm/page_io.c */
Minchan Kimaca8bf32009-06-16 15:33:02 -0700304extern int swap_readpage(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305extern int swap_writepage(struct page *page, struct writeback_control *wbc);
NeilBrown6712ecf2007-09-27 12:47:43 +0200306extern void end_swap_bio_read(struct bio *bio, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308/* linux/mm/swap_state.c */
309extern struct address_space swapper_space;
310#define total_swapcache_pages swapper_space.nrpages
311extern void show_swap_cache_info(void);
Hugh Dickinsac47b002009-01-06 14:39:39 -0800312extern int add_to_swap(struct page *);
Hugh Dickins73b12622008-02-04 22:28:50 -0800313extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314extern void __delete_from_swap_cache(struct page *);
315extern void delete_from_swap_cache(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316extern void free_page_and_swap_cache(struct page *);
317extern void free_pages_and_swap_cache(struct page **, int);
Hugh Dickins46017e92008-02-04 22:28:41 -0800318extern struct page *lookup_swap_cache(swp_entry_t);
Hugh Dickins02098fe2008-02-04 22:28:42 -0800319extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
Hugh Dickins46017e92008-02-04 22:28:41 -0800320 struct vm_area_struct *vma, unsigned long addr);
Hugh Dickins02098fe2008-02-04 22:28:42 -0800321extern struct page *swapin_readahead(swp_entry_t, gfp_t,
Hugh Dickins46017e92008-02-04 22:28:41 -0800322 struct vm_area_struct *vma, unsigned long addr);
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324/* linux/mm/swapfile.c */
Hugh Dickinsb9627162009-01-06 14:39:41 -0800325extern long nr_swap_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326extern long total_swap_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327extern void si_swapinfo(struct sysinfo *);
328extern swp_entry_t get_swap_page(void);
Hugh Dickins910321e2010-09-09 16:38:07 -0700329extern swp_entry_t get_swap_page_of_type(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330extern int valid_swaphandles(swp_entry_t, unsigned long *);
Hugh Dickins570a335b2009-12-14 17:58:46 -0800331extern int add_swap_count_continuation(swp_entry_t, gfp_t);
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800332extern void swap_shmem_alloc(swp_entry_t);
Hugh Dickins570a335b2009-12-14 17:58:46 -0800333extern int swap_duplicate(swp_entry_t);
334extern int swapcache_prepare(swp_entry_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335extern void swap_free(swp_entry_t);
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700336extern void swapcache_free(swp_entry_t, struct page *page);
Hugh Dickins2509ef22009-01-06 14:40:10 -0800337extern int free_swap_and_cache(swp_entry_t);
Rafael J. Wysocki7bf23682007-01-05 16:36:28 -0800338extern int swap_type_of(dev_t, sector_t, struct block_device **);
Rafael J. Wysockif577eb32006-03-23 02:59:59 -0800339extern unsigned int count_swap_pages(int, int);
Lee Schermerhornd4906e12009-12-14 17:58:49 -0800340extern sector_t map_swap_page(struct page *, struct block_device **);
Rafael J. Wysocki3aef83e2006-12-06 20:34:10 -0800341extern sector_t swapdev_block(int, pgoff_t);
Hugh Dickins7b1fe592009-01-06 14:39:34 -0800342extern int reuse_swap_page(struct page *);
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800343extern int try_to_free_swap(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344struct backing_dev_info;
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346/* linux/mm/thrash.c */
Hugh Dickinsa5c9b692009-06-23 12:36:58 -0700347extern struct mm_struct *swap_token_mm;
348extern void grab_swap_token(struct mm_struct *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349extern void __put_swap_token(struct mm_struct *);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -0700350extern void disable_swap_token(struct mem_cgroup *memcg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352static inline int has_swap_token(struct mm_struct *mm)
353{
354 return (mm == swap_token_mm);
355}
356
357static inline void put_swap_token(struct mm_struct *mm)
358{
359 if (has_swap_token(mm))
360 __put_swap_token(mm);
361}
362
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800363#ifdef CONFIG_CGROUP_MEM_RES_CTLR
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700364extern void
365mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
Daisuke Nishimura02491442010-03-10 15:22:17 -0800366extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800367#else
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800368static inline void
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700369mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800370{
371}
372#endif
373#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
374extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
375#else
376static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800377{
378}
379#endif
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381#else /* CONFIG_SWAP */
382
Hugh Dickinsb9627162009-01-06 14:39:41 -0800383#define nr_swap_pages 0L
384#define total_swap_pages 0L
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385#define total_swapcache_pages 0UL
386
387#define si_swapinfo(val) \
388 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
Olaf Hering9ae5b3c2005-08-07 09:42:24 -0700389/* only sparc can not include linux/pagemap.h in this file
390 * so leave page_cache_release and release_pages undeclared... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391#define free_page_and_swap_cache(page) \
392 page_cache_release(page)
393#define free_pages_and_swap_cache(pages, nr) \
394 release_pages((pages), (nr), 0);
395
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700396static inline void show_swap_cache_info(void)
397{
398}
399
Hugh Dickins2509ef22009-01-06 14:40:10 -0800400#define free_swap_and_cache(swp) is_migration_entry(swp)
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700401#define swapcache_prepare(swp) is_migration_entry(swp)
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700402
Hugh Dickins570a335b2009-12-14 17:58:46 -0800403static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700404{
Hugh Dickins570a335b2009-12-14 17:58:46 -0800405 return 0;
406}
407
Hugh Dickinsaaa46862009-12-14 17:58:47 -0800408static inline void swap_shmem_alloc(swp_entry_t swp)
409{
410}
411
Hugh Dickins570a335b2009-12-14 17:58:46 -0800412static inline int swap_duplicate(swp_entry_t swp)
413{
414 return 0;
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700415}
416
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700417static inline void swap_free(swp_entry_t swp)
418{
419}
420
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700421static inline void swapcache_free(swp_entry_t swp, struct page *page)
422{
423}
424
Hugh Dickins02098fe2008-02-04 22:28:42 -0800425static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700426 struct vm_area_struct *vma, unsigned long addr)
427{
428 return NULL;
429}
430
Hugh Dickins9fab5612009-03-31 15:23:33 -0700431static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
432{
433 return 0;
434}
435
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700436static inline struct page *lookup_swap_cache(swp_entry_t swp)
437{
438 return NULL;
439}
440
Hugh Dickins60371d92009-01-06 14:39:40 -0800441static inline int add_to_swap(struct page *page)
442{
443 return 0;
444}
445
Hugh Dickins73b12622008-02-04 22:28:50 -0800446static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
447 gfp_t gfp_mask)
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700448{
Hugh Dickins73b12622008-02-04 22:28:50 -0800449 return -1;
Con Kolivasbd96b9e2006-06-23 02:03:42 -0700450}
451
452static inline void __delete_from_swap_cache(struct page *page)
453{
454}
455
456static inline void delete_from_swap_cache(struct page *page)
457{
458}
459
Hugh Dickins7b1fe592009-01-06 14:39:34 -0800460#define reuse_swap_page(page) (page_mapcount(page) == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800462static inline int try_to_free_swap(struct page *page)
Rik van Riel68a223942008-10-18 20:26:23 -0700463{
464 return 0;
465}
466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467static inline swp_entry_t get_swap_page(void)
468{
469 swp_entry_t entry;
470 entry.val = 0;
471 return entry;
472}
473
474/* linux/mm/thrash.c */
Johannes Weinera5abeea2009-09-21 17:01:13 -0700475static inline void put_swap_token(struct mm_struct *mm)
476{
477}
478
479static inline void grab_swap_token(struct mm_struct *mm)
480{
481}
482
483static inline int has_swap_token(struct mm_struct *mm)
484{
485 return 0;
486}
487
KOSAKI Motohiroa4336582011-06-15 15:08:13 -0700488static inline void disable_swap_token(struct mem_cgroup *memcg)
Johannes Weinera5abeea2009-09-21 17:01:13 -0700489{
490}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Daisuke Nishimurae767e052009-05-28 14:34:28 -0700492static inline void
493mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
494{
495}
496
Daisuke Nishimura02491442010-03-10 15:22:17 -0800497#ifdef CONFIG_CGROUP_MEM_RES_CTLR
498static inline int
499mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
500{
501 return 0;
502}
503#endif
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505#endif /* CONFIG_SWAP */
506#endif /* __KERNEL__*/
507#endif /* _LINUX_SWAP_H */