blob: daf6ff6e199a5cbe129a387c2b319c06d62486bc [file] [log] [blame]
Christoph Lameter6225e932007-05-06 14:49:50 -07001/*
2 * Quicklist support.
3 *
4 * Quicklists are light weight lists of pages that have a defined state
5 * on alloc and free. Pages must be in the quicklist specific defined state
6 * (zero by default) when the page is freed. It seems that the initial idea
7 * for such lists first came from Dave Miller and then various other people
8 * improved on it.
9 *
10 * Copyright (C) 2007 SGI,
Christoph Lameter93e205a2016-03-17 14:21:15 -070011 * Christoph Lameter <cl@linux.com>
Christoph Lameter6225e932007-05-06 14:49:50 -070012 * Generalized, added support for multiple lists and
13 * constructors / destructors.
14 */
15#include <linux/kernel.h>
16
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
Christoph Lameter6225e932007-05-06 14:49:50 -070018#include <linux/mm.h>
19#include <linux/mmzone.h>
Christoph Lameter6225e932007-05-06 14:49:50 -070020#include <linux/quicklist.h>
21
Tejun Heo204fba42009-06-24 15:13:45 +090022DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist);
Christoph Lameter6225e932007-05-06 14:49:50 -070023
24#define FRACTION_OF_NODE_MEM 16
25
26static unsigned long max_pages(unsigned long min_pages)
27{
28 unsigned long node_free_pages, max;
KOSAKI Motohirob9541852008-09-02 14:35:58 -070029 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node;
Christoph Lameter6225e932007-05-06 14:49:50 -070032
Christoph Lameter96990a42008-01-14 00:55:14 -080033 node_free_pages =
34#ifdef CONFIG_ZONE_DMA
35 zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
36#endif
37#ifdef CONFIG_ZONE_DMA32
38 zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
39#endif
40 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
41
Christoph Lameter6225e932007-05-06 14:49:50 -070042 max = node_free_pages / FRACTION_OF_NODE_MEM;
KOSAKI Motohirob9541852008-09-02 14:35:58 -070043
Rusty Russelldb790782009-09-24 09:34:52 -060044 num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
KOSAKI Motohirob9541852008-09-02 14:35:58 -070045 max /= num_cpus_on_node;
46
Christoph Lameter6225e932007-05-06 14:49:50 -070047 return max(max, min_pages);
48}
49
50static long min_pages_to_free(struct quicklist *q,
51 unsigned long min_pages, long max_free)
52{
53 long pages_to_free;
54
55 pages_to_free = q->nr_pages - max_pages(min_pages);
56
57 return min(pages_to_free, max_free);
58}
59
60/*
61 * Trim down the number of pages in the quicklist
62 */
63void quicklist_trim(int nr, void (*dtor)(void *),
64 unsigned long min_pages, unsigned long max_free)
65{
66 long pages_to_free;
67 struct quicklist *q;
68
69 q = &get_cpu_var(quicklist)[nr];
70 if (q->nr_pages > min_pages) {
71 pages_to_free = min_pages_to_free(q, min_pages, max_free);
72
73 while (pages_to_free > 0) {
74 /*
75 * We pass a gfp_t of 0 to quicklist_alloc here
76 * because we will never call into the page allocator.
77 */
78 void *p = quicklist_alloc(nr, 0, NULL);
79
80 if (dtor)
81 dtor(p);
82 free_page((unsigned long)p);
83 pages_to_free--;
84 }
85 }
86 put_cpu_var(quicklist);
87}
88
89unsigned long quicklist_total_size(void)
90{
91 unsigned long count = 0;
92 int cpu;
93 struct quicklist *ql, *q;
94
95 for_each_online_cpu(cpu) {
96 ql = per_cpu(quicklist, cpu);
97 for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
98 count += q->nr_pages;
99 }
100 return count;
101}
102