blob: 3f703f7cb398056547af212577909a3d6c46b116 [file] [log] [blame]
Christoph Lameter6225e932007-05-06 14:49:50 -07001/*
2 * Quicklist support.
3 *
4 * Quicklists are light weight lists of pages that have a defined state
5 * on alloc and free. Pages must be in the quicklist specific defined state
6 * (zero by default) when the page is freed. It seems that the initial idea
7 * for such lists first came from Dave Miller and then various other people
8 * improved on it.
9 *
10 * Copyright (C) 2007 SGI,
11 * Christoph Lameter <clameter@sgi.com>
12 * Generalized, added support for multiple lists and
13 * constructors / destructors.
14 */
15#include <linux/kernel.h>
16
17#include <linux/mm.h>
18#include <linux/mmzone.h>
19#include <linux/module.h>
20#include <linux/quicklist.h>
21
22DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
23
24#define FRACTION_OF_NODE_MEM 16
25
26static unsigned long max_pages(unsigned long min_pages)
27{
28 unsigned long node_free_pages, max;
Christoph Lameter96990a42008-01-14 00:55:14 -080029 struct zone *zones = NODE_DATA(numa_node_id())->node_zones;
Christoph Lameter6225e932007-05-06 14:49:50 -070030
Christoph Lameter96990a42008-01-14 00:55:14 -080031 node_free_pages =
32#ifdef CONFIG_ZONE_DMA
33 zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
34#endif
35#ifdef CONFIG_ZONE_DMA32
36 zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
37#endif
38 zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
39
Christoph Lameter6225e932007-05-06 14:49:50 -070040 max = node_free_pages / FRACTION_OF_NODE_MEM;
41 return max(max, min_pages);
42}
43
44static long min_pages_to_free(struct quicklist *q,
45 unsigned long min_pages, long max_free)
46{
47 long pages_to_free;
48
49 pages_to_free = q->nr_pages - max_pages(min_pages);
50
51 return min(pages_to_free, max_free);
52}
53
54/*
55 * Trim down the number of pages in the quicklist
56 */
57void quicklist_trim(int nr, void (*dtor)(void *),
58 unsigned long min_pages, unsigned long max_free)
59{
60 long pages_to_free;
61 struct quicklist *q;
62
63 q = &get_cpu_var(quicklist)[nr];
64 if (q->nr_pages > min_pages) {
65 pages_to_free = min_pages_to_free(q, min_pages, max_free);
66
67 while (pages_to_free > 0) {
68 /*
69 * We pass a gfp_t of 0 to quicklist_alloc here
70 * because we will never call into the page allocator.
71 */
72 void *p = quicklist_alloc(nr, 0, NULL);
73
74 if (dtor)
75 dtor(p);
76 free_page((unsigned long)p);
77 pages_to_free--;
78 }
79 }
80 put_cpu_var(quicklist);
81}
82
83unsigned long quicklist_total_size(void)
84{
85 unsigned long count = 0;
86 int cpu;
87 struct quicklist *ql, *q;
88
89 for_each_online_cpu(cpu) {
90 ql = per_cpu(quicklist, cpu);
91 for (q = ql; q < ql + CONFIG_NR_QUICK; q++)
92 count += q->nr_pages;
93 }
94 return count;
95}
96