Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of |
| 3 | * the "Intel 460GTX Chipset Software Developer's Manual": |
| 4 | * http://developer.intel.com/design/itanium/downloads/24870401s.htm |
| 5 | */ |
| 6 | /* |
| 7 | * 460GX support by Chris Ahna <christopher.j.ahna@intel.com> |
| 8 | * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com> |
| 9 | */ |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/pci.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/agp_backend.h> |
| 14 | |
| 15 | #include "agp.h" |
| 16 | |
| 17 | #define INTEL_I460_BAPBASE 0x98 |
| 18 | #define INTEL_I460_GXBCTL 0xa0 |
| 19 | #define INTEL_I460_AGPSIZ 0xa2 |
| 20 | #define INTEL_I460_ATTBASE 0xfe200000 |
| 21 | #define INTEL_I460_GATT_VALID (1UL << 24) |
| 22 | #define INTEL_I460_GATT_COHERENT (1UL << 25) |
| 23 | |
| 24 | /* |
| 25 | * The i460 can operate with large (4MB) pages, but there is no sane way to support this |
| 26 | * within the current kernel/DRM environment, so we disable the relevant code for now. |
| 27 | * See also comments in ia64_alloc_page()... |
| 28 | */ |
| 29 | #define I460_LARGE_IO_PAGES 0 |
| 30 | |
| 31 | #if I460_LARGE_IO_PAGES |
| 32 | # define I460_IO_PAGE_SHIFT i460.io_page_shift |
| 33 | #else |
| 34 | # define I460_IO_PAGE_SHIFT 12 |
| 35 | #endif |
| 36 | |
| 37 | #define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT) |
| 38 | #define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT)) |
| 39 | #define I460_SRAM_IO_DISABLE (1 << 4) |
| 40 | #define I460_BAPBASE_ENABLE (1 << 3) |
| 41 | #define I460_AGPSIZ_MASK 0x7 |
| 42 | #define I460_4M_PS (1 << 1) |
| 43 | |
| 44 | /* Control bits for Out-Of-GART coherency and Burst Write Combining */ |
| 45 | #define I460_GXBCTL_OOG (1UL << 0) |
| 46 | #define I460_GXBCTL_BWC (1UL << 2) |
| 47 | |
| 48 | /* |
| 49 | * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the |
| 50 | * gatt_table and gatt_table_real pointers a "void *"... |
| 51 | */ |
| 52 | #define RD_GATT(index) readl((u32 *) i460.gatt + (index)) |
| 53 | #define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index)) |
| 54 | /* |
| 55 | * The 460 spec says we have to read the last location written to make sure that all |
| 56 | * writes have taken effect |
| 57 | */ |
| 58 | #define WR_FLUSH_GATT(index) RD_GATT(index) |
| 59 | |
| 60 | #define log2(x) ffz(~(x)) |
| 61 | |
| 62 | static struct { |
| 63 | void *gatt; /* ioremap'd GATT area */ |
| 64 | |
| 65 | /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */ |
| 66 | u8 io_page_shift; |
| 67 | |
| 68 | /* BIOS configures chipset to one of 2 possible apbase values: */ |
| 69 | u8 dynamic_apbase; |
| 70 | |
| 71 | /* structure for tracking partial use of 4MB GART pages: */ |
| 72 | struct lp_desc { |
| 73 | unsigned long *alloced_map; /* bitmap of kernel-pages in use */ |
| 74 | int refcount; /* number of kernel pages using the large page */ |
| 75 | u64 paddr; /* physical address of large page */ |
| 76 | } *lp_desc; |
| 77 | } i460; |
| 78 | |
| 79 | static struct aper_size_info_8 i460_sizes[3] = |
| 80 | { |
| 81 | /* |
| 82 | * The 32GB aperture is only available with a 4M GART page size. Due to the |
| 83 | * dynamic GART page size, we can't figure out page_order or num_entries until |
| 84 | * runtime. |
| 85 | */ |
| 86 | {32768, 0, 0, 4}, |
| 87 | {1024, 0, 0, 2}, |
| 88 | {256, 0, 0, 1} |
| 89 | }; |
| 90 | |
| 91 | static struct gatt_mask i460_masks[] = |
| 92 | { |
| 93 | { |
| 94 | .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, |
| 95 | .type = 0 |
| 96 | } |
| 97 | }; |
| 98 | |
| 99 | static int i460_fetch_size (void) |
| 100 | { |
| 101 | int i; |
| 102 | u8 temp; |
| 103 | struct aper_size_info_8 *values; |
| 104 | |
| 105 | /* Determine the GART page size */ |
| 106 | pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp); |
| 107 | i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12; |
| 108 | pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift); |
| 109 | |
| 110 | if (i460.io_page_shift != I460_IO_PAGE_SHIFT) { |
| 111 | printk(KERN_ERR PFX |
| 112 | "I/O (GART) page-size %ZuKB doesn't match expected size %ZuKB\n", |
| 113 | 1UL << (i460.io_page_shift - 10), 1UL << (I460_IO_PAGE_SHIFT)); |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | values = A_SIZE_8(agp_bridge->driver->aperture_sizes); |
| 118 | |
| 119 | pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); |
| 120 | |
| 121 | /* Exit now if the IO drivers for the GART SRAMS are turned off */ |
| 122 | if (temp & I460_SRAM_IO_DISABLE) { |
| 123 | printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n"); |
| 124 | printk(KERN_ERR PFX "AGPGART operation not possible\n"); |
| 125 | return 0; |
| 126 | } |
| 127 | |
| 128 | /* Make sure we don't try to create an 2 ^ 23 entry GATT */ |
| 129 | if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { |
| 130 | printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n"); |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | /* Determine the proper APBASE register */ |
| 135 | if (temp & I460_BAPBASE_ENABLE) |
| 136 | i460.dynamic_apbase = INTEL_I460_BAPBASE; |
| 137 | else |
| 138 | i460.dynamic_apbase = AGP_APBASE; |
| 139 | |
| 140 | for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { |
| 141 | /* |
| 142 | * Dynamically calculate the proper num_entries and page_order values for |
| 143 | * the define aperture sizes. Take care not to shift off the end of |
| 144 | * values[i].size. |
| 145 | */ |
| 146 | values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12); |
| 147 | values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT); |
| 148 | } |
| 149 | |
| 150 | for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { |
| 151 | /* Neglect control bits when matching up size_value */ |
| 152 | if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { |
| 153 | agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i); |
| 154 | agp_bridge->aperture_size_idx = i; |
| 155 | return values[i].size; |
| 156 | } |
| 157 | } |
| 158 | |
| 159 | return 0; |
| 160 | } |
| 161 | |
| 162 | /* There isn't anything to do here since 460 has no GART TLB. */ |
| 163 | static void i460_tlb_flush (struct agp_memory *mem) |
| 164 | { |
| 165 | return; |
| 166 | } |
| 167 | |
| 168 | /* |
| 169 | * This utility function is needed to prevent corruption of the control bits |
| 170 | * which are stored along with the aperture size in 460's AGPSIZ register |
| 171 | */ |
| 172 | static void i460_write_agpsiz (u8 size_value) |
| 173 | { |
| 174 | u8 temp; |
| 175 | |
| 176 | pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp); |
| 177 | pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, |
| 178 | ((temp & ~I460_AGPSIZ_MASK) | size_value)); |
| 179 | } |
| 180 | |
| 181 | static void i460_cleanup (void) |
| 182 | { |
| 183 | struct aper_size_info_8 *previous_size; |
| 184 | |
| 185 | previous_size = A_SIZE_8(agp_bridge->previous_size); |
| 186 | i460_write_agpsiz(previous_size->size_value); |
| 187 | |
| 188 | if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) |
| 189 | kfree(i460.lp_desc); |
| 190 | } |
| 191 | |
| 192 | static int i460_configure (void) |
| 193 | { |
| 194 | union { |
| 195 | u32 small[2]; |
| 196 | u64 large; |
| 197 | } temp; |
| 198 | size_t size; |
| 199 | u8 scratch; |
| 200 | struct aper_size_info_8 *current_size; |
| 201 | |
| 202 | temp.large = 0; |
| 203 | |
| 204 | current_size = A_SIZE_8(agp_bridge->current_size); |
| 205 | i460_write_agpsiz(current_size->size_value); |
| 206 | |
| 207 | /* |
| 208 | * Do the necessary rigmarole to read all eight bytes of APBASE. |
| 209 | * This has to be done since the AGP aperture can be above 4GB on |
| 210 | * 460 based systems. |
| 211 | */ |
| 212 | pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0])); |
| 213 | pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1])); |
| 214 | |
| 215 | /* Clear BAR control bits */ |
| 216 | agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1); |
| 217 | |
| 218 | pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch); |
| 219 | pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, |
| 220 | (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); |
| 221 | |
| 222 | /* |
| 223 | * Initialize partial allocation trackers if a GART page is bigger than a kernel |
| 224 | * page. |
| 225 | */ |
| 226 | if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) { |
| 227 | size = current_size->num_entries * sizeof(i460.lp_desc[0]); |
Dave Jones | 0ea27d9 | 2005-10-20 15:12:16 -0700 | [diff] [blame] | 228 | i460.lp_desc = kzalloc(size, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | if (!i460.lp_desc) |
| 230 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | } |
| 232 | return 0; |
| 233 | } |
| 234 | |
| 235 | static int i460_create_gatt_table (struct agp_bridge_data *bridge) |
| 236 | { |
| 237 | int page_order, num_entries, i; |
| 238 | void *temp; |
| 239 | |
| 240 | /* |
| 241 | * Load up the fixed address of the GART SRAMS which hold our GATT table. |
| 242 | */ |
| 243 | temp = agp_bridge->current_size; |
| 244 | page_order = A_SIZE_8(temp)->page_order; |
| 245 | num_entries = A_SIZE_8(temp)->num_entries; |
| 246 | |
| 247 | i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order); |
| 248 | |
| 249 | /* These are no good, the should be removed from the agp_bridge strucure... */ |
| 250 | agp_bridge->gatt_table_real = NULL; |
| 251 | agp_bridge->gatt_table = NULL; |
| 252 | agp_bridge->gatt_bus_addr = 0; |
| 253 | |
| 254 | for (i = 0; i < num_entries; ++i) |
| 255 | WR_GATT(i, 0); |
| 256 | WR_FLUSH_GATT(i - 1); |
| 257 | return 0; |
| 258 | } |
| 259 | |
| 260 | static int i460_free_gatt_table (struct agp_bridge_data *bridge) |
| 261 | { |
| 262 | int num_entries, i; |
| 263 | void *temp; |
| 264 | |
| 265 | temp = agp_bridge->current_size; |
| 266 | |
| 267 | num_entries = A_SIZE_8(temp)->num_entries; |
| 268 | |
| 269 | for (i = 0; i < num_entries; ++i) |
| 270 | WR_GATT(i, 0); |
| 271 | WR_FLUSH_GATT(num_entries - 1); |
| 272 | |
| 273 | iounmap(i460.gatt); |
| 274 | return 0; |
| 275 | } |
| 276 | |
| 277 | /* |
| 278 | * The following functions are called when the I/O (GART) page size is smaller than |
| 279 | * PAGE_SIZE. |
| 280 | */ |
| 281 | |
| 282 | static int i460_insert_memory_small_io_page (struct agp_memory *mem, |
| 283 | off_t pg_start, int type) |
| 284 | { |
| 285 | unsigned long paddr, io_pg_start, io_page_size; |
| 286 | int i, j, k, num_entries; |
| 287 | void *temp; |
| 288 | |
| 289 | pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", |
| 290 | mem, pg_start, type, mem->memory[0]); |
| 291 | |
| 292 | io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; |
| 293 | |
| 294 | temp = agp_bridge->current_size; |
| 295 | num_entries = A_SIZE_8(temp)->num_entries; |
| 296 | |
| 297 | if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) { |
| 298 | printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); |
| 299 | return -EINVAL; |
| 300 | } |
| 301 | |
| 302 | j = io_pg_start; |
| 303 | while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) { |
| 304 | if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) { |
| 305 | pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n", |
| 306 | j, RD_GATT(j)); |
| 307 | return -EBUSY; |
| 308 | } |
| 309 | j++; |
| 310 | } |
| 311 | |
| 312 | io_page_size = 1UL << I460_IO_PAGE_SHIFT; |
| 313 | for (i = 0, j = io_pg_start; i < mem->page_count; i++) { |
| 314 | paddr = mem->memory[i]; |
| 315 | for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size) |
| 316 | WR_GATT(j, agp_bridge->driver->mask_memory(agp_bridge, |
| 317 | paddr, mem->type)); |
| 318 | } |
| 319 | WR_FLUSH_GATT(j - 1); |
| 320 | return 0; |
| 321 | } |
| 322 | |
| 323 | static int i460_remove_memory_small_io_page(struct agp_memory *mem, |
| 324 | off_t pg_start, int type) |
| 325 | { |
| 326 | int i; |
| 327 | |
| 328 | pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n", |
| 329 | mem, pg_start, type); |
| 330 | |
| 331 | pg_start = I460_IOPAGES_PER_KPAGE * pg_start; |
| 332 | |
| 333 | for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++) |
| 334 | WR_GATT(i, 0); |
| 335 | WR_FLUSH_GATT(i - 1); |
| 336 | return 0; |
| 337 | } |
| 338 | |
| 339 | #if I460_LARGE_IO_PAGES |
| 340 | |
| 341 | /* |
| 342 | * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE. |
| 343 | * |
| 344 | * This situation is interesting since AGP memory allocations that are smaller than a |
| 345 | * single GART page are possible. The i460.lp_desc array tracks partial allocation of the |
| 346 | * large GART pages to work around this issue. |
| 347 | * |
| 348 | * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page |
| 349 | * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and |
| 350 | * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated). |
| 351 | */ |
| 352 | |
| 353 | static int i460_alloc_large_page (struct lp_desc *lp) |
| 354 | { |
| 355 | unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT; |
| 356 | size_t map_size; |
| 357 | void *lpage; |
| 358 | |
| 359 | lpage = (void *) __get_free_pages(GFP_KERNEL, order); |
| 360 | if (!lpage) { |
| 361 | printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n"); |
| 362 | return -ENOMEM; |
| 363 | } |
| 364 | |
| 365 | map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8; |
Dave Jones | 0ea27d9 | 2005-10-20 15:12:16 -0700 | [diff] [blame] | 366 | lp->alloced_map = kzalloc(map_size, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | if (!lp->alloced_map) { |
| 368 | free_pages((unsigned long) lpage, order); |
| 369 | printk(KERN_ERR PFX "Out of memory, we're in trouble...\n"); |
| 370 | return -ENOMEM; |
| 371 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | |
Keir Fraser | 07eee78 | 2005-03-30 13:17:04 -0800 | [diff] [blame] | 373 | lp->paddr = virt_to_gart(lpage); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | lp->refcount = 0; |
| 375 | atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); |
| 376 | return 0; |
| 377 | } |
| 378 | |
| 379 | static void i460_free_large_page (struct lp_desc *lp) |
| 380 | { |
| 381 | kfree(lp->alloced_map); |
| 382 | lp->alloced_map = NULL; |
| 383 | |
Keir Fraser | 07eee78 | 2005-03-30 13:17:04 -0800 | [diff] [blame] | 384 | free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp); |
| 386 | } |
| 387 | |
| 388 | static int i460_insert_memory_large_io_page (struct agp_memory *mem, |
| 389 | off_t pg_start, int type) |
| 390 | { |
| 391 | int i, start_offset, end_offset, idx, pg, num_entries; |
| 392 | struct lp_desc *start, *end, *lp; |
| 393 | void *temp; |
| 394 | |
| 395 | temp = agp_bridge->current_size; |
| 396 | num_entries = A_SIZE_8(temp)->num_entries; |
| 397 | |
| 398 | /* Figure out what pg_start means in terms of our large GART pages */ |
| 399 | start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; |
| 400 | end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; |
| 401 | start_offset = pg_start % I460_KPAGES_PER_IOPAGE; |
| 402 | end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; |
| 403 | |
| 404 | if (end > i460.lp_desc + num_entries) { |
| 405 | printk(KERN_ERR PFX "Looks like we're out of AGP memory\n"); |
| 406 | return -EINVAL; |
| 407 | } |
| 408 | |
| 409 | /* Check if the requested region of the aperture is free */ |
| 410 | for (lp = start; lp <= end; ++lp) { |
| 411 | if (!lp->alloced_map) |
| 412 | continue; /* OK, the entire large page is available... */ |
| 413 | |
| 414 | for (idx = ((lp == start) ? start_offset : 0); |
| 415 | idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); |
| 416 | idx++) |
| 417 | { |
| 418 | if (test_bit(idx, lp->alloced_map)) |
| 419 | return -EBUSY; |
| 420 | } |
| 421 | } |
| 422 | |
| 423 | for (lp = start, i = 0; lp <= end; ++lp) { |
| 424 | if (!lp->alloced_map) { |
| 425 | /* Allocate new GART pages... */ |
| 426 | if (i460_alloc_large_page(lp) < 0) |
| 427 | return -ENOMEM; |
| 428 | pg = lp - i460.lp_desc; |
| 429 | WR_GATT(pg, agp_bridge->driver->mask_memory(agp_bridge, |
| 430 | lp->paddr, 0)); |
| 431 | WR_FLUSH_GATT(pg); |
| 432 | } |
| 433 | |
| 434 | for (idx = ((lp == start) ? start_offset : 0); |
| 435 | idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); |
| 436 | idx++, i++) |
| 437 | { |
| 438 | mem->memory[i] = lp->paddr + idx*PAGE_SIZE; |
| 439 | __set_bit(idx, lp->alloced_map); |
| 440 | ++lp->refcount; |
| 441 | } |
| 442 | } |
| 443 | return 0; |
| 444 | } |
| 445 | |
| 446 | static int i460_remove_memory_large_io_page (struct agp_memory *mem, |
| 447 | off_t pg_start, int type) |
| 448 | { |
| 449 | int i, pg, start_offset, end_offset, idx, num_entries; |
| 450 | struct lp_desc *start, *end, *lp; |
| 451 | void *temp; |
| 452 | |
| 453 | temp = agp_bridge->driver->current_size; |
| 454 | num_entries = A_SIZE_8(temp)->num_entries; |
| 455 | |
| 456 | /* Figure out what pg_start means in terms of our large GART pages */ |
| 457 | start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE]; |
| 458 | end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE]; |
| 459 | start_offset = pg_start % I460_KPAGES_PER_IOPAGE; |
| 460 | end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE; |
| 461 | |
| 462 | for (i = 0, lp = start; lp <= end; ++lp) { |
| 463 | for (idx = ((lp == start) ? start_offset : 0); |
| 464 | idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE); |
| 465 | idx++, i++) |
| 466 | { |
| 467 | mem->memory[i] = 0; |
| 468 | __clear_bit(idx, lp->alloced_map); |
| 469 | --lp->refcount; |
| 470 | } |
| 471 | |
| 472 | /* Free GART pages if they are unused */ |
| 473 | if (lp->refcount == 0) { |
| 474 | pg = lp - i460.lp_desc; |
| 475 | WR_GATT(pg, 0); |
| 476 | WR_FLUSH_GATT(pg); |
| 477 | i460_free_large_page(lp); |
| 478 | } |
| 479 | } |
| 480 | return 0; |
| 481 | } |
| 482 | |
| 483 | /* Wrapper routines to call the approriate {small_io_page,large_io_page} function */ |
| 484 | |
| 485 | static int i460_insert_memory (struct agp_memory *mem, |
| 486 | off_t pg_start, int type) |
| 487 | { |
| 488 | if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) |
| 489 | return i460_insert_memory_small_io_page(mem, pg_start, type); |
| 490 | else |
| 491 | return i460_insert_memory_large_io_page(mem, pg_start, type); |
| 492 | } |
| 493 | |
| 494 | static int i460_remove_memory (struct agp_memory *mem, |
| 495 | off_t pg_start, int type) |
| 496 | { |
| 497 | if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) |
| 498 | return i460_remove_memory_small_io_page(mem, pg_start, type); |
| 499 | else |
| 500 | return i460_remove_memory_large_io_page(mem, pg_start, type); |
| 501 | } |
| 502 | |
| 503 | /* |
| 504 | * If the I/O (GART) page size is bigger than the kernel page size, we don't want to |
| 505 | * allocate memory until we know where it is to be bound in the aperture (a |
| 506 | * multi-kernel-page alloc might fit inside of an already allocated GART page). |
| 507 | * |
| 508 | * Let's just hope nobody counts on the allocated AGP memory being there before bind time |
| 509 | * (I don't think current drivers do)... |
| 510 | */ |
| 511 | static void *i460_alloc_page (struct agp_bridge_data *bridge) |
| 512 | { |
| 513 | void *page; |
| 514 | |
| 515 | if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) |
| 516 | page = agp_generic_alloc_page(agp_bridge); |
| 517 | else |
| 518 | /* Returning NULL would cause problems */ |
| 519 | /* AK: really dubious code. */ |
| 520 | page = (void *)~0UL; |
| 521 | return page; |
| 522 | } |
| 523 | |
| 524 | static void i460_destroy_page (void *page) |
| 525 | { |
| 526 | if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) |
| 527 | agp_generic_destroy_page(page); |
| 528 | } |
| 529 | |
| 530 | #endif /* I460_LARGE_IO_PAGES */ |
| 531 | |
| 532 | static unsigned long i460_mask_memory (struct agp_bridge_data *bridge, |
| 533 | unsigned long addr, int type) |
| 534 | { |
| 535 | /* Make sure the returned address is a valid GATT entry */ |
| 536 | return bridge->driver->masks[0].mask |
| 537 | | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12); |
| 538 | } |
| 539 | |
| 540 | struct agp_bridge_driver intel_i460_driver = { |
| 541 | .owner = THIS_MODULE, |
| 542 | .aperture_sizes = i460_sizes, |
| 543 | .size_type = U8_APER_SIZE, |
| 544 | .num_aperture_sizes = 3, |
| 545 | .configure = i460_configure, |
| 546 | .fetch_size = i460_fetch_size, |
| 547 | .cleanup = i460_cleanup, |
| 548 | .tlb_flush = i460_tlb_flush, |
| 549 | .mask_memory = i460_mask_memory, |
| 550 | .masks = i460_masks, |
| 551 | .agp_enable = agp_generic_enable, |
| 552 | .cache_flush = global_cache_flush, |
| 553 | .create_gatt_table = i460_create_gatt_table, |
| 554 | .free_gatt_table = i460_free_gatt_table, |
| 555 | #if I460_LARGE_IO_PAGES |
| 556 | .insert_memory = i460_insert_memory, |
| 557 | .remove_memory = i460_remove_memory, |
| 558 | .agp_alloc_page = i460_alloc_page, |
| 559 | .agp_destroy_page = i460_destroy_page, |
| 560 | #else |
| 561 | .insert_memory = i460_insert_memory_small_io_page, |
| 562 | .remove_memory = i460_remove_memory_small_io_page, |
| 563 | .agp_alloc_page = agp_generic_alloc_page, |
| 564 | .agp_destroy_page = agp_generic_destroy_page, |
| 565 | #endif |
| 566 | .alloc_by_type = agp_generic_alloc_by_type, |
| 567 | .free_by_type = agp_generic_free_by_type, |
| 568 | .cant_use_aperture = 1, |
| 569 | }; |
| 570 | |
| 571 | static int __devinit agp_intel_i460_probe(struct pci_dev *pdev, |
| 572 | const struct pci_device_id *ent) |
| 573 | { |
| 574 | struct agp_bridge_data *bridge; |
| 575 | u8 cap_ptr; |
| 576 | |
| 577 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); |
| 578 | if (!cap_ptr) |
| 579 | return -ENODEV; |
| 580 | |
| 581 | bridge = agp_alloc_bridge(); |
| 582 | if (!bridge) |
| 583 | return -ENOMEM; |
| 584 | |
| 585 | bridge->driver = &intel_i460_driver; |
| 586 | bridge->dev = pdev; |
| 587 | bridge->capndx = cap_ptr; |
| 588 | |
| 589 | printk(KERN_INFO PFX "Detected Intel 460GX chipset\n"); |
| 590 | |
| 591 | pci_set_drvdata(pdev, bridge); |
| 592 | return agp_add_bridge(bridge); |
| 593 | } |
| 594 | |
| 595 | static void __devexit agp_intel_i460_remove(struct pci_dev *pdev) |
| 596 | { |
| 597 | struct agp_bridge_data *bridge = pci_get_drvdata(pdev); |
| 598 | |
| 599 | agp_remove_bridge(bridge); |
| 600 | agp_put_bridge(bridge); |
| 601 | } |
| 602 | |
| 603 | static struct pci_device_id agp_intel_i460_pci_table[] = { |
| 604 | { |
| 605 | .class = (PCI_CLASS_BRIDGE_HOST << 8), |
| 606 | .class_mask = ~0, |
| 607 | .vendor = PCI_VENDOR_ID_INTEL, |
| 608 | .device = PCI_DEVICE_ID_INTEL_84460GX, |
| 609 | .subvendor = PCI_ANY_ID, |
| 610 | .subdevice = PCI_ANY_ID, |
| 611 | }, |
| 612 | { } |
| 613 | }; |
| 614 | |
| 615 | MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table); |
| 616 | |
| 617 | static struct pci_driver agp_intel_i460_pci_driver = { |
Dave Jones | ea248bc | 2005-10-24 20:20:11 -0700 | [diff] [blame^] | 618 | .owner = THIS_MODULE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | .name = "agpgart-intel-i460", |
| 620 | .id_table = agp_intel_i460_pci_table, |
| 621 | .probe = agp_intel_i460_probe, |
| 622 | .remove = __devexit_p(agp_intel_i460_remove), |
| 623 | }; |
| 624 | |
| 625 | static int __init agp_intel_i460_init(void) |
| 626 | { |
| 627 | if (agp_off) |
| 628 | return -EINVAL; |
| 629 | return pci_register_driver(&agp_intel_i460_pci_driver); |
| 630 | } |
| 631 | |
| 632 | static void __exit agp_intel_i460_cleanup(void) |
| 633 | { |
| 634 | pci_unregister_driver(&agp_intel_i460_pci_driver); |
| 635 | } |
| 636 | |
| 637 | module_init(agp_intel_i460_init); |
| 638 | module_exit(agp_intel_i460_cleanup); |
| 639 | |
| 640 | MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>"); |
| 641 | MODULE_LICENSE("GPL and additional rights"); |