blob: 3405b4ee1757e3deb196c93e49b9641dad8d56fb [file] [log] [blame]
Johannes Weiner5d1ea482014-12-10 15:44:55 -08001#include <linux/swap_cgroup.h>
Paul Mundt4c8210422008-10-22 14:14:58 -07002#include <linux/vmalloc.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -08003#include <linux/mm.h>
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -08004
Johannes Weiner5d1ea482014-12-10 15:44:55 -08005#include <linux/swapops.h> /* depends on mm.h include */
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -08006
7static DEFINE_MUTEX(swap_cgroup_mutex);
8struct swap_cgroup_ctrl {
9 struct page **map;
10 unsigned long length;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -040011 spinlock_t lock;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080012};
13
H Hartley Sweeten61600f52011-11-02 13:38:36 -070014static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080015
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080016struct swap_cgroup {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -070017 unsigned short id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080018};
19#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080020
21/*
22 * SwapCgroup implements "lookup" and "exchange" operations.
23 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
24 * against SwapCache. At swap_free(), this is accessed directly from swap.
25 *
26 * This means,
27 * - we have no race in "exchange" when we're accessed via SwapCache because
28 * SwapCache(and its swp_entry) is under lock.
29 * - When called via swap_free(), there is no user of this entry and no race.
30 * Then, we don't need lock around "exchange".
31 *
32 * TODO: we can push these buffers out to HIGHMEM.
33 */
34
35/*
36 * allocate buffer for swap_cgroup.
37 */
38static int swap_cgroup_prepare(int type)
39{
40 struct page *page;
41 struct swap_cgroup_ctrl *ctrl;
42 unsigned long idx, max;
43
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080044 ctrl = &swap_cgroup_ctrl[type];
45
46 for (idx = 0; idx < ctrl->length; idx++) {
47 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
48 if (!page)
49 goto not_enough_page;
50 ctrl->map[idx] = page;
Yu Zhaof7ae7d22017-06-16 14:02:31 -070051
52 if (!(idx % SWAP_CLUSTER_MAX))
53 cond_resched();
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080054 }
55 return 0;
56not_enough_page:
57 max = idx;
58 for (idx = 0; idx < max; idx++)
59 __free_page(ctrl->map[idx]);
60
61 return -ENOMEM;
62}
63
Bob Liu9fb4b7c2012-01-12 17:18:48 -080064static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
65 struct swap_cgroup_ctrl **ctrlp)
66{
67 pgoff_t offset = swp_offset(ent);
68 struct swap_cgroup_ctrl *ctrl;
69 struct page *mappage;
Hugh Dickinsc09ff082012-03-05 20:52:55 -080070 struct swap_cgroup *sc;
Bob Liu9fb4b7c2012-01-12 17:18:48 -080071
72 ctrl = &swap_cgroup_ctrl[swp_type(ent)];
73 if (ctrlp)
74 *ctrlp = ctrl;
75
76 mappage = ctrl->map[offset / SC_PER_PAGE];
Hugh Dickinsc09ff082012-03-05 20:52:55 -080077 sc = page_address(mappage);
78 return sc + offset % SC_PER_PAGE;
Bob Liu9fb4b7c2012-01-12 17:18:48 -080079}
80
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080081/**
Daisuke Nishimura02491442010-03-10 15:22:17 -080082 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
Wanpeng Lidad75572012-06-20 12:53:01 -070083 * @ent: swap entry to be cmpxchged
Daisuke Nishimura02491442010-03-10 15:22:17 -080084 * @old: old id
85 * @new: new id
86 *
87 * Returns old id at success, 0 at failure.
Lucas De Marchi25985ed2011-03-30 22:57:33 -030088 * (There is no mem_cgroup using 0 as its id)
Daisuke Nishimura02491442010-03-10 15:22:17 -080089 */
90unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
91 unsigned short old, unsigned short new)
92{
Daisuke Nishimura02491442010-03-10 15:22:17 -080093 struct swap_cgroup_ctrl *ctrl;
Daisuke Nishimura02491442010-03-10 15:22:17 -080094 struct swap_cgroup *sc;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -040095 unsigned long flags;
96 unsigned short retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -080097
Bob Liu9fb4b7c2012-01-12 17:18:48 -080098 sc = lookup_swap_cgroup(ent, &ctrl);
Daisuke Nishimura02491442010-03-10 15:22:17 -080099
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400100 spin_lock_irqsave(&ctrl->lock, flags);
101 retval = sc->id;
102 if (retval == old)
103 sc->id = new;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800104 else
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400105 retval = 0;
106 spin_unlock_irqrestore(&ctrl->lock, flags);
107 return retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800108}
109
110/**
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800111 * swap_cgroup_record - record mem_cgroup for this swp_entry.
112 * @ent: swap entry to be recorded into
Wanpeng Lidad75572012-06-20 12:53:01 -0700113 * @id: mem_cgroup to be recorded
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800114 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700115 * Returns old value at success, 0 at failure.
116 * (Of course, old value can be 0.)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800117 */
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700118unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800119{
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800120 struct swap_cgroup_ctrl *ctrl;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800121 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700122 unsigned short old;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400123 unsigned long flags;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800124
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800125 sc = lookup_swap_cgroup(ent, &ctrl);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800126
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400127 spin_lock_irqsave(&ctrl->lock, flags);
128 old = sc->id;
129 sc->id = id;
130 spin_unlock_irqrestore(&ctrl->lock, flags);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800131
132 return old;
133}
134
135/**
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800136 * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800137 * @ent: swap entry to be looked up.
138 *
Hugh Dickinsb3ff8a22014-01-12 20:23:27 -0800139 * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800140 */
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800141unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800142{
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800143 return lookup_swap_cgroup(ent, NULL)->id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800144}
145
146int swap_cgroup_swapon(int type, unsigned long max_pages)
147{
148 void *array;
149 unsigned long array_size;
150 unsigned long length;
151 struct swap_cgroup_ctrl *ctrl;
152
153 if (!do_swap_account)
154 return 0;
155
Namhyung Kim33278f72011-05-26 16:25:30 -0700156 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800157 array_size = length * sizeof(void *);
158
Joe Perches8c1fec12011-05-28 10:36:34 -0700159 array = vzalloc(array_size);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800160 if (!array)
161 goto nomem;
162
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800163 ctrl = &swap_cgroup_ctrl[type];
164 mutex_lock(&swap_cgroup_mutex);
165 ctrl->length = length;
166 ctrl->map = array;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400167 spin_lock_init(&ctrl->lock);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800168 if (swap_cgroup_prepare(type)) {
169 /* memory shortage */
170 ctrl->map = NULL;
171 ctrl->length = 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800172 mutex_unlock(&swap_cgroup_mutex);
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700173 vfree(array);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800174 goto nomem;
175 }
176 mutex_unlock(&swap_cgroup_mutex);
177
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800178 return 0;
179nomem:
Joe Perches11705322016-03-17 14:19:50 -0700180 pr_info("couldn't allocate enough memory for swap_cgroup\n");
181 pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n");
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800182 return -ENOMEM;
183}
184
185void swap_cgroup_swapoff(int type)
186{
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700187 struct page **map;
188 unsigned long i, length;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800189 struct swap_cgroup_ctrl *ctrl;
190
191 if (!do_swap_account)
192 return;
193
194 mutex_lock(&swap_cgroup_mutex);
195 ctrl = &swap_cgroup_ctrl[type];
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700196 map = ctrl->map;
197 length = ctrl->length;
198 ctrl->map = NULL;
199 ctrl->length = 0;
200 mutex_unlock(&swap_cgroup_mutex);
201
202 if (map) {
203 for (i = 0; i < length; i++) {
204 struct page *page = map[i];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800205 if (page)
206 __free_page(page);
David Rientjesb1355222017-04-07 16:05:00 -0700207 if (!(i % SWAP_CLUSTER_MAX))
208 cond_resched();
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800209 }
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700210 vfree(map);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800211 }
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800212}