blob: fcd2740f4ed7e6623d62188cf40c7ce406af9630 [file] [log] [blame]
Johannes Weiner5d1ea482014-12-10 15:44:55 -08001#include <linux/swap_cgroup.h>
Paul Mundt4c8210422008-10-22 14:14:58 -07002#include <linux/vmalloc.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -08003#include <linux/mm.h>
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -08004
Johannes Weiner5d1ea482014-12-10 15:44:55 -08005#include <linux/swapops.h> /* depends on mm.h include */
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -08006
7static DEFINE_MUTEX(swap_cgroup_mutex);
8struct swap_cgroup_ctrl {
9 struct page **map;
10 unsigned long length;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -040011 spinlock_t lock;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080012};
13
H Hartley Sweeten61600f52011-11-02 13:38:36 -070014static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080015
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080016struct swap_cgroup {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -070017 unsigned short id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080018};
19#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080020
21/*
22 * SwapCgroup implements "lookup" and "exchange" operations.
23 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
24 * against SwapCache. At swap_free(), this is accessed directly from swap.
25 *
26 * This means,
27 * - we have no race in "exchange" when we're accessed via SwapCache because
28 * SwapCache(and its swp_entry) is under lock.
29 * - When called via swap_free(), there is no user of this entry and no race.
30 * Then, we don't need lock around "exchange".
31 *
32 * TODO: we can push these buffers out to HIGHMEM.
33 */
34
35/*
36 * allocate buffer for swap_cgroup.
37 */
38static int swap_cgroup_prepare(int type)
39{
40 struct page *page;
41 struct swap_cgroup_ctrl *ctrl;
42 unsigned long idx, max;
43
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080044 ctrl = &swap_cgroup_ctrl[type];
45
46 for (idx = 0; idx < ctrl->length; idx++) {
47 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
48 if (!page)
49 goto not_enough_page;
50 ctrl->map[idx] = page;
Yu Zhaoef707622017-06-16 14:02:31 -070051
52 if (!(idx % SWAP_CLUSTER_MAX))
53 cond_resched();
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080054 }
55 return 0;
56not_enough_page:
57 max = idx;
58 for (idx = 0; idx < max; idx++)
59 __free_page(ctrl->map[idx]);
60
61 return -ENOMEM;
62}
63
Huang Ying38d8b4e2017-07-06 15:37:18 -070064static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl,
65 pgoff_t offset)
66{
67 struct page *mappage;
68 struct swap_cgroup *sc;
69
70 mappage = ctrl->map[offset / SC_PER_PAGE];
71 sc = page_address(mappage);
72 return sc + offset % SC_PER_PAGE;
73}
74
Bob Liu9fb4b7c2012-01-12 17:18:48 -080075static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
76 struct swap_cgroup_ctrl **ctrlp)
77{
78 pgoff_t offset = swp_offset(ent);
79 struct swap_cgroup_ctrl *ctrl;
Bob Liu9fb4b7c2012-01-12 17:18:48 -080080
81 ctrl = &swap_cgroup_ctrl[swp_type(ent)];
82 if (ctrlp)
83 *ctrlp = ctrl;
Huang Ying38d8b4e2017-07-06 15:37:18 -070084 return __lookup_swap_cgroup(ctrl, offset);
Bob Liu9fb4b7c2012-01-12 17:18:48 -080085}
86
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -080087/**
Daisuke Nishimura02491442010-03-10 15:22:17 -080088 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
Wanpeng Lidad75572012-06-20 12:53:01 -070089 * @ent: swap entry to be cmpxchged
Daisuke Nishimura02491442010-03-10 15:22:17 -080090 * @old: old id
91 * @new: new id
92 *
93 * Returns old id at success, 0 at failure.
Lucas De Marchi25985ed2011-03-30 22:57:33 -030094 * (There is no mem_cgroup using 0 as its id)
Daisuke Nishimura02491442010-03-10 15:22:17 -080095 */
96unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
97 unsigned short old, unsigned short new)
98{
Daisuke Nishimura02491442010-03-10 15:22:17 -080099 struct swap_cgroup_ctrl *ctrl;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800100 struct swap_cgroup *sc;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400101 unsigned long flags;
102 unsigned short retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800103
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800104 sc = lookup_swap_cgroup(ent, &ctrl);
Daisuke Nishimura02491442010-03-10 15:22:17 -0800105
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400106 spin_lock_irqsave(&ctrl->lock, flags);
107 retval = sc->id;
108 if (retval == old)
109 sc->id = new;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800110 else
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400111 retval = 0;
112 spin_unlock_irqrestore(&ctrl->lock, flags);
113 return retval;
Daisuke Nishimura02491442010-03-10 15:22:17 -0800114}
115
116/**
Huang Ying38d8b4e2017-07-06 15:37:18 -0700117 * swap_cgroup_record - record mem_cgroup for a set of swap entries
118 * @ent: the first swap entry to be recorded into
Wanpeng Lidad75572012-06-20 12:53:01 -0700119 * @id: mem_cgroup to be recorded
Huang Ying38d8b4e2017-07-06 15:37:18 -0700120 * @nr_ents: number of swap entries to be recorded
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800121 *
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700122 * Returns old value at success, 0 at failure.
123 * (Of course, old value can be 0.)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800124 */
Huang Ying38d8b4e2017-07-06 15:37:18 -0700125unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
126 unsigned int nr_ents)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800127{
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800128 struct swap_cgroup_ctrl *ctrl;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800129 struct swap_cgroup *sc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -0700130 unsigned short old;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400131 unsigned long flags;
Huang Ying38d8b4e2017-07-06 15:37:18 -0700132 pgoff_t offset = swp_offset(ent);
133 pgoff_t end = offset + nr_ents;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800134
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800135 sc = lookup_swap_cgroup(ent, &ctrl);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800136
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400137 spin_lock_irqsave(&ctrl->lock, flags);
138 old = sc->id;
Huang Ying38d8b4e2017-07-06 15:37:18 -0700139 for (;;) {
140 VM_BUG_ON(sc->id != old);
141 sc->id = id;
142 offset++;
143 if (offset == end)
144 break;
145 if (offset % SC_PER_PAGE)
146 sc++;
147 else
148 sc = __lookup_swap_cgroup(ctrl, offset);
149 }
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400150 spin_unlock_irqrestore(&ctrl->lock, flags);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800151
152 return old;
153}
154
155/**
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800156 * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800157 * @ent: swap entry to be looked up.
158 *
Hugh Dickinsb3ff8a22014-01-12 20:23:27 -0800159 * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800160 */
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800161unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800162{
Bob Liu9fb4b7c2012-01-12 17:18:48 -0800163 return lookup_swap_cgroup(ent, NULL)->id;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800164}
165
166int swap_cgroup_swapon(int type, unsigned long max_pages)
167{
168 void *array;
169 unsigned long array_size;
170 unsigned long length;
171 struct swap_cgroup_ctrl *ctrl;
172
173 if (!do_swap_account)
174 return 0;
175
Namhyung Kim33278f72011-05-26 16:25:30 -0700176 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800177 array_size = length * sizeof(void *);
178
Joe Perches8c1fec12011-05-28 10:36:34 -0700179 array = vzalloc(array_size);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800180 if (!array)
181 goto nomem;
182
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800183 ctrl = &swap_cgroup_ctrl[type];
184 mutex_lock(&swap_cgroup_mutex);
185 ctrl->length = length;
186 ctrl->map = array;
KAMEZAWA Hiroyukie9e58a42010-03-15 00:34:57 -0400187 spin_lock_init(&ctrl->lock);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800188 if (swap_cgroup_prepare(type)) {
189 /* memory shortage */
190 ctrl->map = NULL;
191 ctrl->length = 0;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800192 mutex_unlock(&swap_cgroup_mutex);
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700193 vfree(array);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800194 goto nomem;
195 }
196 mutex_unlock(&swap_cgroup_mutex);
197
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800198 return 0;
199nomem:
Joe Perches11705322016-03-17 14:19:50 -0700200 pr_info("couldn't allocate enough memory for swap_cgroup\n");
201 pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n");
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800202 return -ENOMEM;
203}
204
205void swap_cgroup_swapoff(int type)
206{
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700207 struct page **map;
208 unsigned long i, length;
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800209 struct swap_cgroup_ctrl *ctrl;
210
211 if (!do_swap_account)
212 return;
213
214 mutex_lock(&swap_cgroup_mutex);
215 ctrl = &swap_cgroup_ctrl[type];
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700216 map = ctrl->map;
217 length = ctrl->length;
218 ctrl->map = NULL;
219 ctrl->length = 0;
220 mutex_unlock(&swap_cgroup_mutex);
221
222 if (map) {
223 for (i = 0; i < length; i++) {
224 struct page *page = map[i];
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800225 if (page)
226 __free_page(page);
David Rientjes460bcec2017-04-07 16:05:00 -0700227 if (!(i % SWAP_CLUSTER_MAX))
228 cond_resched();
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800229 }
Namhyung Kim6a5b18d2011-05-26 16:25:31 -0700230 vfree(map);
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800231 }
KAMEZAWA Hiroyuki27a7faa2009-01-07 18:07:58 -0800232}