blob: d5bb1796e12b19a1dde4960c2f10d0064a8867b9 [file] [log] [blame]
Heiko Carstens5b99cd02006-09-27 01:50:01 -07001#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4#include <linux/types.h>
5#include <linux/threads.h>
6#include <linux/list.h>
7#include <linux/spinlock.h>
8
9struct address_space;
10
11/*
12 * Each physical page in the system has a struct page associated with
13 * it to keep track of whatever it is we are using the page for at the
14 * moment. Note that we have no way to track which tasks are using
15 * a page, though if it is a pagecache page, rmap structures can tell us
16 * who is mapping it.
17 */
18struct page {
19 unsigned long flags; /* Atomic flags, some possibly
20 * updated asynchronously */
21 atomic_t _count; /* Usage count, see below. */
Christoph Lameter81819f02007-05-06 14:49:36 -070022 union {
23 atomic_t _mapcount; /* Count of ptes mapped in mms,
Heiko Carstens5b99cd02006-09-27 01:50:01 -070024 * to show when page is mapped
25 * & limit reverse map searches.
26 */
Christoph Lameter81819f02007-05-06 14:49:36 -070027 struct { /* SLUB uses */
28 short unsigned int inuse;
29 short unsigned int offset;
30 };
31 };
Heiko Carstens5b99cd02006-09-27 01:50:01 -070032 union {
33 struct {
34 unsigned long private; /* Mapping-private opaque data:
35 * usually used for buffer_heads
36 * if PagePrivate set; used for
37 * swp_entry_t if PageSwapCache;
38 * indicates order in the buddy
39 * system if PG_buddy is set.
40 */
41 struct address_space *mapping; /* If low bit clear, points to
42 * inode address_space, or NULL.
43 * If page mapped as anonymous
44 * memory, low bit is set, and
45 * it points to anon_vma object:
46 * see PAGE_MAPPING_ANON below.
47 */
48 };
49#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
50 spinlock_t ptl;
51#endif
Christoph Lameter81819f02007-05-06 14:49:36 -070052 struct { /* SLUB uses */
Christoph Lameter894b8782007-05-10 03:15:16 -070053 void **lockless_freelist;
Christoph Lameter81819f02007-05-06 14:49:36 -070054 struct kmem_cache *slab; /* Pointer to slab */
55 };
Christoph Lameter894b8782007-05-10 03:15:16 -070056 struct {
57 struct page *first_page; /* Compound pages */
58 };
Heiko Carstens5b99cd02006-09-27 01:50:01 -070059 };
Christoph Lameter81819f02007-05-06 14:49:36 -070060 union {
61 pgoff_t index; /* Our offset within mapping. */
Christoph Lameter894b8782007-05-10 03:15:16 -070062 void *freelist; /* SLUB: freelist req. slab lock */
Christoph Lameter81819f02007-05-06 14:49:36 -070063 };
Heiko Carstens5b99cd02006-09-27 01:50:01 -070064 struct list_head lru; /* Pageout list, eg. active_list
65 * protected by zone->lru_lock !
66 */
67 /*
68 * On machines where all RAM is mapped into kernel address space,
69 * we can simply calculate the virtual address. On machines with
70 * highmem some memory is mapped into kernel virtual memory
71 * dynamically, so we need a place to store that address.
72 * Note that this field could be 16 bits on x86 ... ;)
73 *
74 * Architectures with slow multiplication can define
75 * WANT_PAGE_VIRTUAL in asm/page.h
76 */
77#if defined(WANT_PAGE_VIRTUAL)
78 void *virtual; /* Kernel virtual address (NULL if
79 not kmapped, ie. highmem) */
80#endif /* WANT_PAGE_VIRTUAL */
81};
82
83#endif /* _LINUX_MM_TYPES_H */