blob: 149fdf6c5c56f927f3613538c61b3c5831c80af9 [file] [log] [blame]
Anton Vorontsov70ddf632013-04-29 15:08:31 -07001/*
2 * Linux VM pressure
3 *
4 * Copyright 2012 Linaro Ltd.
5 * Anton Vorontsov <anton.vorontsov@linaro.org>
6 *
7 * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro,
8 * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <linux/cgroup.h>
16#include <linux/fs.h>
17#include <linux/log2.h>
18#include <linux/sched.h>
19#include <linux/mm.h>
20#include <linux/vmstat.h>
21#include <linux/eventfd.h>
Tejun Heo1ff6bbf2014-01-28 18:10:37 -050022#include <linux/slab.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070023#include <linux/swap.h>
24#include <linux/printk.h>
25#include <linux/vmpressure.h>
26
27/*
28 * The window size (vmpressure_win) is the number of scanned pages before
29 * we try to analyze scanned/reclaimed ratio. So the window is used as a
30 * rate-limit tunable for the "low" level notification, and also for
31 * averaging the ratio for medium/critical levels. Using small window
32 * sizes can cause lot of false positives, but too big window size will
33 * delay the notifications.
34 *
35 * As the vmscan reclaimer logic works with chunks which are multiple of
36 * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well.
37 *
38 * TODO: Make the window size depend on machine size, as we do for vmstat
39 * thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
40 */
41static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
42
43/*
44 * These thresholds are used when we account memory pressure through
45 * scanned/reclaimed ratio. The current values were chosen empirically. In
46 * essence, they are percents: the higher the value, the more number
47 * unsuccessful reclaims there were.
48 */
49static const unsigned int vmpressure_level_med = 60;
50static const unsigned int vmpressure_level_critical = 95;
51
52/*
53 * When there are too little pages left to scan, vmpressure() may miss the
54 * critical pressure as number of pages will be less than "window size".
55 * However, in that case the vmscan priority will raise fast as the
56 * reclaimer will try to scan LRUs more deeply.
57 *
58 * The vmscan logic considers these special priorities:
59 *
60 * prio == DEF_PRIORITY (12): reclaimer starts with that value
61 * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
62 * prio == 0 : close to OOM, kernel scans every page in an lru
63 *
64 * Any value in this range is acceptable for this tunable (i.e. from 12 to
65 * 0). Current value for the vmpressure_level_critical_prio is chosen
66 * empirically, but the number, in essence, means that we consider
67 * critical level when scanning depth is ~10% of the lru size (vmscan
68 * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one
69 * eights).
70 */
71static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10);
72
73static struct vmpressure *work_to_vmpressure(struct work_struct *work)
74{
75 return container_of(work, struct vmpressure, work);
76}
77
Anton Vorontsov70ddf632013-04-29 15:08:31 -070078static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
79{
Tejun Heo182446d2013-08-08 20:11:24 -040080 struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
81 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Anton Vorontsov70ddf632013-04-29 15:08:31 -070082
83 memcg = parent_mem_cgroup(memcg);
84 if (!memcg)
85 return NULL;
86 return memcg_to_vmpressure(memcg);
87}
88
89enum vmpressure_levels {
90 VMPRESSURE_LOW = 0,
91 VMPRESSURE_MEDIUM,
92 VMPRESSURE_CRITICAL,
93 VMPRESSURE_NUM_LEVELS,
94};
95
96static const char * const vmpressure_str_levels[] = {
97 [VMPRESSURE_LOW] = "low",
98 [VMPRESSURE_MEDIUM] = "medium",
99 [VMPRESSURE_CRITICAL] = "critical",
100};
101
102static enum vmpressure_levels vmpressure_level(unsigned long pressure)
103{
104 if (pressure >= vmpressure_level_critical)
105 return VMPRESSURE_CRITICAL;
106 else if (pressure >= vmpressure_level_med)
107 return VMPRESSURE_MEDIUM;
108 return VMPRESSURE_LOW;
109}
110
111static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
112 unsigned long reclaimed)
113{
114 unsigned long scale = scanned + reclaimed;
115 unsigned long pressure;
116
117 /*
118 * We calculate the ratio (in percents) of how many pages were
119 * scanned vs. reclaimed in a given time frame (window). Note that
120 * time is in VM reclaimer's "ticks", i.e. number of pages
121 * scanned. This makes it possible to set desired reaction time
122 * and serves as a ratelimit.
123 */
124 pressure = scale - (reclaimed * scale / scanned);
125 pressure = pressure * 100 / scale;
126
127 pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
128 scanned, reclaimed);
129
130 return vmpressure_level(pressure);
131}
132
133struct vmpressure_event {
134 struct eventfd_ctx *efd;
135 enum vmpressure_levels level;
136 struct list_head node;
137};
138
139static bool vmpressure_event(struct vmpressure *vmpr,
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800140 enum vmpressure_levels level)
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700141{
142 struct vmpressure_event *ev;
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700143 bool signalled = false;
144
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700145 mutex_lock(&vmpr->events_lock);
146
147 list_for_each_entry(ev, &vmpr->events, node) {
148 if (level >= ev->level) {
149 eventfd_signal(ev->efd, 1);
150 signalled = true;
151 }
152 }
153
154 mutex_unlock(&vmpr->events_lock);
155
156 return signalled;
157}
158
159static void vmpressure_work_fn(struct work_struct *work)
160{
161 struct vmpressure *vmpr = work_to_vmpressure(work);
162 unsigned long scanned;
163 unsigned long reclaimed;
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800164 enum vmpressure_levels level;
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700165
Andrew Morton91b57192014-12-02 15:59:28 -0800166 spin_lock(&vmpr->sr_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700167 /*
168 * Several contexts might be calling vmpressure(), so it is
169 * possible that the work was rescheduled again before the old
170 * work context cleared the counters. In that case we will run
171 * just after the old work returns, but then scanned might be zero
172 * here. No need for any locks here since we don't care if
173 * vmpr->reclaimed is in sync.
174 */
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800175 scanned = vmpr->tree_scanned;
Andrew Morton91b57192014-12-02 15:59:28 -0800176 if (!scanned) {
177 spin_unlock(&vmpr->sr_lock);
178 return;
179 }
180
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800181 reclaimed = vmpr->tree_reclaimed;
182 vmpr->tree_scanned = 0;
183 vmpr->tree_reclaimed = 0;
Michal Hocko22f20202013-07-31 13:53:48 -0700184 spin_unlock(&vmpr->sr_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700185
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800186 level = vmpressure_calc_level(scanned, reclaimed);
187
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700188 do {
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800189 if (vmpressure_event(vmpr, level))
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700190 break;
191 /*
192 * If not handled, propagate the event upward into the
193 * hierarchy.
194 */
195 } while ((vmpr = vmpressure_parent(vmpr)));
196}
197
198/**
199 * vmpressure() - Account memory pressure through scanned/reclaimed ratio
200 * @gfp: reclaimer's gfp mask
201 * @memcg: cgroup memory controller handle
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800202 * @tree: legacy subtree mode
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700203 * @scanned: number of pages scanned
204 * @reclaimed: number of pages reclaimed
205 *
206 * This function should be called from the vmscan reclaim path to account
207 * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
208 * pressure index is then further refined and averaged over time.
209 *
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800210 * If @tree is set, vmpressure is in traditional userspace reporting
211 * mode: @memcg is considered the pressure root and userspace is
212 * notified of the entire subtree's reclaim efficiency.
213 *
214 * If @tree is not set, reclaim efficiency is recorded for @memcg, and
215 * only in-kernel users are notified.
216 *
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700217 * This function does not return any value.
218 */
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800219void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700220 unsigned long scanned, unsigned long reclaimed)
221{
222 struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
223
224 /*
225 * Here we only want to account pressure that userland is able to
226 * help us with. For example, suppose that DMA zone is under
227 * pressure; if we notify userland about that kind of pressure,
228 * then it will be mostly a waste as it will trigger unnecessary
229 * freeing of memory by userland (since userland is more likely to
230 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That
231 * is why we include only movable, highmem and FS/IO pages.
232 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so
233 * we account it too.
234 */
235 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
236 return;
237
238 /*
239 * If we got here with no pages scanned, then that is an indicator
240 * that reclaimer was unable to find any shrinkable LRUs at the
241 * current scanning depth. But it does not mean that we should
242 * report the critical pressure, yet. If the scanning priority
243 * (scanning depth) goes too high (deep), we will be notified
244 * through vmpressure_prio(). But so far, keep calm.
245 */
246 if (!scanned)
247 return;
248
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800249 if (tree) {
250 spin_lock(&vmpr->sr_lock);
Vladimir Davydov3c1da7b2016-02-02 16:57:49 -0800251 scanned = vmpr->tree_scanned += scanned;
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800252 vmpr->tree_reclaimed += reclaimed;
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800253 spin_unlock(&vmpr->sr_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700254
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800255 if (scanned < vmpressure_win)
256 return;
257 schedule_work(&vmpr->work);
258 } else {
259 enum vmpressure_levels level;
260
261 /* For now, no users for root-level efficiency */
Hugh Dickins686739f2016-01-14 15:21:37 -0800262 if (!memcg || memcg == root_mem_cgroup)
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800263 return;
264
265 spin_lock(&vmpr->sr_lock);
266 scanned = vmpr->scanned += scanned;
267 reclaimed = vmpr->reclaimed += reclaimed;
268 if (scanned < vmpressure_win) {
269 spin_unlock(&vmpr->sr_lock);
270 return;
271 }
272 vmpr->scanned = vmpr->reclaimed = 0;
273 spin_unlock(&vmpr->sr_lock);
274
275 level = vmpressure_calc_level(scanned, reclaimed);
276
277 if (level > VMPRESSURE_LOW) {
278 /*
279 * Let the socket buffer allocator know that
280 * we are having trouble reclaiming LRU pages.
281 *
282 * For hysteresis keep the pressure state
283 * asserted for a second in which subsequent
284 * pressure events can occur.
285 */
286 memcg->socket_pressure = jiffies + HZ;
287 }
288 }
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700289}
290
291/**
292 * vmpressure_prio() - Account memory pressure through reclaimer priority level
293 * @gfp: reclaimer's gfp mask
294 * @memcg: cgroup memory controller handle
295 * @prio: reclaimer's priority
296 *
297 * This function should be called from the reclaim path every time when
298 * the vmscan's reclaiming priority (scanning depth) changes.
299 *
300 * This function does not return any value.
301 */
302void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
303{
304 /*
305 * We only use prio for accounting critical level. For more info
306 * see comment for vmpressure_level_critical_prio variable above.
307 */
308 if (prio > vmpressure_level_critical_prio)
309 return;
310
311 /*
312 * OK, the prio is below the threshold, updating vmpressure
313 * information before shrinker dives into long shrinking of long
314 * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0
315 * to the vmpressure() basically means that we signal 'critical'
316 * level.
317 */
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800318 vmpressure(gfp, memcg, true, vmpressure_win, 0);
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700319}
320
321/**
322 * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
Tejun Heo59b6f872013-11-22 18:20:43 -0500323 * @memcg: memcg that is interested in vmpressure notifications
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700324 * @eventfd: eventfd context to link notifications with
325 * @args: event arguments (used to set up a pressure level threshold)
326 *
327 * This function associates eventfd context with the vmpressure
328 * infrastructure, so that the notifications will be delivered to the
329 * @eventfd. The @args parameter is a string that denotes pressure level
330 * threshold (one of vmpressure_str_levels, i.e. "low", "medium", or
331 * "critical").
332 *
Tejun Heo347c4a82013-11-22 18:20:43 -0500333 * To be used as memcg event method.
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700334 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500335int vmpressure_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500336 struct eventfd_ctx *eventfd, const char *args)
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700337{
Tejun Heo59b6f872013-11-22 18:20:43 -0500338 struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700339 struct vmpressure_event *ev;
340 int level;
341
342 for (level = 0; level < VMPRESSURE_NUM_LEVELS; level++) {
343 if (!strcmp(vmpressure_str_levels[level], args))
344 break;
345 }
346
347 if (level >= VMPRESSURE_NUM_LEVELS)
348 return -EINVAL;
349
350 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
351 if (!ev)
352 return -ENOMEM;
353
354 ev->efd = eventfd;
355 ev->level = level;
356
357 mutex_lock(&vmpr->events_lock);
358 list_add(&ev->node, &vmpr->events);
359 mutex_unlock(&vmpr->events_lock);
360
361 return 0;
362}
363
364/**
365 * vmpressure_unregister_event() - Unbind eventfd from vmpressure
Tejun Heo59b6f872013-11-22 18:20:43 -0500366 * @memcg: memcg handle
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700367 * @eventfd: eventfd context that was used to link vmpressure with the @cg
368 *
369 * This function does internal manipulations to detach the @eventfd from
370 * the vmpressure notifications, and then frees internal resources
371 * associated with the @eventfd (but the @eventfd itself is not freed).
372 *
Tejun Heo347c4a82013-11-22 18:20:43 -0500373 * To be used as memcg event method.
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700374 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500375void vmpressure_unregister_event(struct mem_cgroup *memcg,
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700376 struct eventfd_ctx *eventfd)
377{
Tejun Heo59b6f872013-11-22 18:20:43 -0500378 struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700379 struct vmpressure_event *ev;
380
381 mutex_lock(&vmpr->events_lock);
382 list_for_each_entry(ev, &vmpr->events, node) {
383 if (ev->efd != eventfd)
384 continue;
385 list_del(&ev->node);
386 kfree(ev);
387 break;
388 }
389 mutex_unlock(&vmpr->events_lock);
390}
391
392/**
393 * vmpressure_init() - Initialize vmpressure control structure
394 * @vmpr: Structure to be initialized
395 *
396 * This function should be called on every allocated vmpressure structure
397 * before any usage.
398 */
399void vmpressure_init(struct vmpressure *vmpr)
400{
Michal Hocko22f20202013-07-31 13:53:48 -0700401 spin_lock_init(&vmpr->sr_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700402 mutex_init(&vmpr->events_lock);
403 INIT_LIST_HEAD(&vmpr->events);
404 INIT_WORK(&vmpr->work, vmpressure_work_fn);
405}
Michal Hocko33cb8762013-07-31 13:53:51 -0700406
407/**
408 * vmpressure_cleanup() - shuts down vmpressure control structure
409 * @vmpr: Structure to be cleaned up
410 *
411 * This function should be called before the structure in which it is
412 * embedded is cleaned up.
413 */
414void vmpressure_cleanup(struct vmpressure *vmpr)
415{
416 /*
417 * Make sure there is no pending work before eventfd infrastructure
418 * goes away.
419 */
420 flush_work(&vmpr->work);
421}