blob: 49d19dfc0630bbfbbacd33219d2beecf19e86f01 [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Handle issues around the Tile "home cache" model of coherence.
15 */
16
17#ifndef _ASM_TILE_HOMECACHE_H
18#define _ASM_TILE_HOMECACHE_H
19
20#include <asm/page.h>
21#include <linux/cpumask.h>
22
23struct page;
24struct task_struct;
25struct vm_area_struct;
26struct zone;
27
28/*
29 * Coherence point for the page is its memory controller.
30 * It is not present in any cache (L1 or L2).
31 */
32#define PAGE_HOME_UNCACHED -1
33
34/*
35 * Is this page immutable (unwritable) and thus able to be cached more
36 * widely than would otherwise be possible? On tile64 this means we
37 * mark the PTE to cache locally; on tilepro it means we have "nc" set.
38 */
39#define PAGE_HOME_IMMUTABLE -2
40
41/*
42 * Each cpu considers its own cache to be the home for the page,
43 * which makes it incoherent.
44 */
45#define PAGE_HOME_INCOHERENT -3
46
Chris Metcalf867e3592010-05-28 23:09:12 -040047/* Home for the page is distributed via hash-for-home. */
48#define PAGE_HOME_HASH -4
Chris Metcalf867e3592010-05-28 23:09:12 -040049
50/* Support wrapper to use instead of explicit hv_flush_remote(). */
51extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
52 const struct cpumask *cache_cpumask,
53 HV_VirtAddr tlb_va, unsigned long tlb_length,
54 unsigned long tlb_pgsize,
55 const struct cpumask *tlb_cpumask,
56 HV_Remote_ASID *asids, int asidcount);
57
58/* Set homing-related bits in a PTE (can also pass a pgprot_t). */
59extern pte_t pte_set_home(pte_t pte, int home);
60
61/* Do a cache eviction on the specified cpus. */
62extern void homecache_evict(const struct cpumask *mask);
63
64/*
65 * Change a kernel page's homecache. It must not be mapped in user space.
66 * If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when
67 * no other cpu can reference the page, and causes a full-chip cache/TLB flush.
68 */
69extern void homecache_change_page_home(struct page *, int order, int home);
70
71/*
72 * Flush a page out of whatever cache(s) it is in.
73 * This is more than just finv, since it properly handles waiting
Chris Metcalfbbaa22c2012-06-13 14:46:40 -040074 * for the data to reach memory, but it can be quite
75 * heavyweight, particularly on incoherent or immutable memory.
Chris Metcalf867e3592010-05-28 23:09:12 -040076 */
Chris Metcalfbbaa22c2012-06-13 14:46:40 -040077extern void homecache_finv_page(struct page *);
78
79/*
80 * Flush a page out of the specified home cache.
81 * Note that the specified home need not be the actual home of the page,
82 * as for example might be the case when coordinating with I/O devices.
83 */
84extern void homecache_finv_map_page(struct page *, int home);
Chris Metcalf867e3592010-05-28 23:09:12 -040085
86/*
87 * Allocate a page with the given GFP flags, home, and optionally
88 * node. These routines are actually just wrappers around the normal
89 * alloc_pages() / alloc_pages_node() functions, which set and clear
90 * a per-cpu variable to communicate with homecache_new_kernel_page().
91 * If !CONFIG_HOMECACHE, uses homecache_change_page_home().
92 */
93extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
94 unsigned int order, int home);
95extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
96 unsigned int order, int home);
97#define homecache_alloc_page(gfp_mask, home) \
98 homecache_alloc_pages(gfp_mask, 0, home)
99
100/*
101 * These routines are just pass-throughs to free_pages() when
102 * we support full homecaching. If !CONFIG_HOMECACHE, then these
103 * routines use homecache_change_page_home() to reset the home
104 * back to the default before returning the page to the allocator.
105 */
Chris Metcalfbbaa22c2012-06-13 14:46:40 -0400106void __homecache_free_pages(struct page *, unsigned int order);
Chris Metcalf867e3592010-05-28 23:09:12 -0400107void homecache_free_pages(unsigned long addr, unsigned int order);
Chris Metcalfbbaa22c2012-06-13 14:46:40 -0400108#define __homecache_free_page(page) __homecache_free_pages((page), 0)
109#define homecache_free_page(page) homecache_free_pages((page), 0)
Chris Metcalf867e3592010-05-28 23:09:12 -0400110
111
112/*
113 * Report the page home for LOWMEM pages by examining their kernel PTE,
114 * or for highmem pages as the default home.
115 */
116extern int page_home(struct page *);
117
118#define homecache_migrate_kthread() do {} while (0)
119
120#define homecache_kpte_lock() 0
121#define homecache_kpte_unlock(flags) do {} while (0)
122
123
124#endif /* _ASM_TILE_HOMECACHE_H */