blob: 9825cd64c9b6e7126432e14c0ee6aebd804a799c [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_AGP_H
2#define _ASM_X86_AGP_H
Thomas Gleixner17d36702007-10-15 23:28:19 +02003
4#include <asm/pgtable.h>
5#include <asm/cacheflush.h>
6
7/*
8 * Functions to keep the agpgart mappings coherent with the MMU. The
9 * GART gives the CPU a physical alias of pages in memory. The alias
10 * region is mapped uncacheable. Make sure there are no conflicting
11 * mappings with different cachability attributes for the same
12 * page. This avoids data corruption on some CPUs.
13 */
14
Arjan van de Ven6d238cc2008-01-30 13:34:06 +010015#define map_page_into_agp(page) set_pages_uc(page, 1)
16#define unmap_page_from_agp(page) set_pages_wb(page, 1)
Thomas Gleixner17d36702007-10-15 23:28:19 +020017
18/*
19 * Could use CLFLUSH here if the cpu supports it. But then it would
20 * need to be called for each cacheline of the whole page so it may
21 * not be worth it. Would need a page for it.
22 */
23#define flush_agp_cache() wbinvd()
24
25/* Convert a physical address to an address suitable for the GART. */
26#define phys_to_gart(x) (x)
27#define gart_to_phys(x) (x)
28
29/* GATT allocation. Returns/accepts GATT kernel virtual address. */
30#define alloc_gatt_pages(order) \
31 ((char *)__get_free_pages(GFP_KERNEL, (order)))
32#define free_gatt_pages(table, order) \
33 free_pages((unsigned long)(table), (order))
34
H. Peter Anvin1965aae2008-10-22 22:26:29 -070035#endif /* _ASM_X86_AGP_H */