blob: 2b1a0e96c71f84b51401733fbce68e8a8baecedc [file] [log] [blame]
Daniel Vetterf51b7662010-04-14 00:29:52 +02001/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
28static const struct aper_size_info_fixed intel_i810_sizes[] =
29{
30 {64, 16384, 4},
31 /* The 32M mode still requires a 64k gatt */
32 {32, 8192, 4}
33};
34
35#define AGP_DCACHE_MEMORY 1
36#define AGP_PHYS_MEMORY 2
37#define INTEL_AGP_CACHED_MEMORY 3
38
39static struct gatt_mask intel_i810_masks[] =
40{
41 {.mask = I810_PTE_VALID, .type = 0},
42 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
43 {.mask = I810_PTE_VALID, .type = 0},
44 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
45 .type = INTEL_AGP_CACHED_MEMORY}
46};
47
48static struct _intel_private {
49 struct pci_dev *pcidev; /* device one */
50 u8 __iomem *registers;
51 u32 __iomem *gtt; /* I915G */
52 int num_dcache_entries;
53 /* gtt_entries is the number of gtt entries that are already mapped
54 * to stolen memory. Stolen memory is larger than the memory mapped
55 * through gtt_entries, as it includes some reserved space for the BIOS
56 * popup and for the GTT.
57 */
58 int gtt_entries; /* i830+ */
59 int gtt_total_size;
60 union {
61 void __iomem *i9xx_flush_page;
62 void *i8xx_flush_page;
63 };
64 struct page *i8xx_page;
65 struct resource ifp_resource;
66 int resource_valid;
67} intel_private;
68
69#ifdef USE_PCI_DMA_API
70static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
71{
72 *ret = pci_map_page(intel_private.pcidev, page, 0,
73 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
74 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
75 return -EINVAL;
76 return 0;
77}
78
79static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
80{
81 pci_unmap_page(intel_private.pcidev, dma,
82 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
83}
84
85static void intel_agp_free_sglist(struct agp_memory *mem)
86{
87 struct sg_table st;
88
89 st.sgl = mem->sg_list;
90 st.orig_nents = st.nents = mem->page_count;
91
92 sg_free_table(&st);
93
94 mem->sg_list = NULL;
95 mem->num_sg = 0;
96}
97
98static int intel_agp_map_memory(struct agp_memory *mem)
99{
100 struct sg_table st;
101 struct scatterlist *sg;
102 int i;
103
104 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
105
106 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
107 return -ENOMEM;
108
109 mem->sg_list = sg = st.sgl;
110
111 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
112 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
113
114 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
115 mem->page_count, PCI_DMA_BIDIRECTIONAL);
116 if (unlikely(!mem->num_sg)) {
117 intel_agp_free_sglist(mem);
118 return -ENOMEM;
119 }
120 return 0;
121}
122
123static void intel_agp_unmap_memory(struct agp_memory *mem)
124{
125 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
126
127 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
128 mem->page_count, PCI_DMA_BIDIRECTIONAL);
129 intel_agp_free_sglist(mem);
130}
131
132static void intel_agp_insert_sg_entries(struct agp_memory *mem,
133 off_t pg_start, int mask_type)
134{
135 struct scatterlist *sg;
136 int i, j;
137
138 j = pg_start;
139
140 WARN_ON(!mem->num_sg);
141
142 if (mem->num_sg == mem->page_count) {
143 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
144 writel(agp_bridge->driver->mask_memory(agp_bridge,
145 sg_dma_address(sg), mask_type),
146 intel_private.gtt+j);
147 j++;
148 }
149 } else {
150 /* sg may merge pages, but we have to separate
151 * per-page addr for GTT */
152 unsigned int len, m;
153
154 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
155 len = sg_dma_len(sg) / PAGE_SIZE;
156 for (m = 0; m < len; m++) {
157 writel(agp_bridge->driver->mask_memory(agp_bridge,
158 sg_dma_address(sg) + m * PAGE_SIZE,
159 mask_type),
160 intel_private.gtt+j);
161 j++;
162 }
163 }
164 }
165 readl(intel_private.gtt+j-1);
166}
167
168#else
169
170static void intel_agp_insert_sg_entries(struct agp_memory *mem,
171 off_t pg_start, int mask_type)
172{
173 int i, j;
174 u32 cache_bits = 0;
175
176 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
177 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
178 {
179 cache_bits = I830_PTE_SYSTEM_CACHED;
180 }
181
182 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
183 writel(agp_bridge->driver->mask_memory(agp_bridge,
184 page_to_phys(mem->pages[i]), mask_type),
185 intel_private.gtt+j);
186 }
187
188 readl(intel_private.gtt+j-1);
189}
190
191#endif
192
193static int intel_i810_fetch_size(void)
194{
195 u32 smram_miscc;
196 struct aper_size_info_fixed *values;
197
198 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
199 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
200
201 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
202 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
203 return 0;
204 }
205 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
Daniel Vettere1583162010-04-14 00:29:58 +0200206 agp_bridge->current_size = (void *) (values + 1);
Daniel Vetterf51b7662010-04-14 00:29:52 +0200207 agp_bridge->aperture_size_idx = 1;
208 return values[1].size;
209 } else {
Daniel Vettere1583162010-04-14 00:29:58 +0200210 agp_bridge->current_size = (void *) (values);
Daniel Vetterf51b7662010-04-14 00:29:52 +0200211 agp_bridge->aperture_size_idx = 0;
212 return values[0].size;
213 }
214
215 return 0;
216}
217
218static int intel_i810_configure(void)
219{
220 struct aper_size_info_fixed *current_size;
221 u32 temp;
222 int i;
223
224 current_size = A_SIZE_FIX(agp_bridge->current_size);
225
226 if (!intel_private.registers) {
227 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
228 temp &= 0xfff80000;
229
230 intel_private.registers = ioremap(temp, 128 * 4096);
231 if (!intel_private.registers) {
232 dev_err(&intel_private.pcidev->dev,
233 "can't remap memory\n");
234 return -ENOMEM;
235 }
236 }
237
238 if ((readl(intel_private.registers+I810_DRAM_CTL)
239 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
240 /* This will need to be dynamically assigned */
241 dev_info(&intel_private.pcidev->dev,
242 "detected 4MB dedicated video ram\n");
243 intel_private.num_dcache_entries = 1024;
244 }
245 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
246 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
247 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
248 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
249
250 if (agp_bridge->driver->needs_scratch_page) {
251 for (i = 0; i < current_size->num_entries; i++) {
252 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
253 }
254 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
255 }
256 global_cache_flush();
257 return 0;
258}
259
260static void intel_i810_cleanup(void)
261{
262 writel(0, intel_private.registers+I810_PGETBL_CTL);
263 readl(intel_private.registers); /* PCI Posting. */
264 iounmap(intel_private.registers);
265}
266
Daniel Vetterf51b7662010-04-14 00:29:52 +0200267static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
268{
269 return;
270}
271
272/* Exists to support ARGB cursors */
273static struct page *i8xx_alloc_pages(void)
274{
275 struct page *page;
276
277 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
278 if (page == NULL)
279 return NULL;
280
281 if (set_pages_uc(page, 4) < 0) {
282 set_pages_wb(page, 4);
283 __free_pages(page, 2);
284 return NULL;
285 }
286 get_page(page);
287 atomic_inc(&agp_bridge->current_memory_agp);
288 return page;
289}
290
291static void i8xx_destroy_pages(struct page *page)
292{
293 if (page == NULL)
294 return;
295
296 set_pages_wb(page, 4);
297 put_page(page);
298 __free_pages(page, 2);
299 atomic_dec(&agp_bridge->current_memory_agp);
300}
301
302static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
303 int type)
304{
305 if (type < AGP_USER_TYPES)
306 return type;
307 else if (type == AGP_USER_CACHED_MEMORY)
308 return INTEL_AGP_CACHED_MEMORY;
309 else
310 return 0;
311}
312
313static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
314 int type)
315{
316 int i, j, num_entries;
317 void *temp;
318 int ret = -EINVAL;
319 int mask_type;
320
321 if (mem->page_count == 0)
322 goto out;
323
324 temp = agp_bridge->current_size;
325 num_entries = A_SIZE_FIX(temp)->num_entries;
326
327 if ((pg_start + mem->page_count) > num_entries)
328 goto out_err;
329
330
331 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
332 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
333 ret = -EBUSY;
334 goto out_err;
335 }
336 }
337
338 if (type != mem->type)
339 goto out_err;
340
341 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
342
343 switch (mask_type) {
344 case AGP_DCACHE_MEMORY:
345 if (!mem->is_flushed)
346 global_cache_flush();
347 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
348 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
349 intel_private.registers+I810_PTE_BASE+(i*4));
350 }
351 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
352 break;
353 case AGP_PHYS_MEMORY:
354 case AGP_NORMAL_MEMORY:
355 if (!mem->is_flushed)
356 global_cache_flush();
357 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
358 writel(agp_bridge->driver->mask_memory(agp_bridge,
359 page_to_phys(mem->pages[i]), mask_type),
360 intel_private.registers+I810_PTE_BASE+(j*4));
361 }
362 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
363 break;
364 default:
365 goto out_err;
366 }
367
Daniel Vetterf51b7662010-04-14 00:29:52 +0200368out:
369 ret = 0;
370out_err:
371 mem->is_flushed = true;
372 return ret;
373}
374
375static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
376 int type)
377{
378 int i;
379
380 if (mem->page_count == 0)
381 return 0;
382
383 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
384 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
385 }
386 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
387
Daniel Vetterf51b7662010-04-14 00:29:52 +0200388 return 0;
389}
390
391/*
392 * The i810/i830 requires a physical address to program its mouse
393 * pointer into hardware.
394 * However the Xserver still writes to it through the agp aperture.
395 */
396static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
397{
398 struct agp_memory *new;
399 struct page *page;
400
401 switch (pg_count) {
402 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
403 break;
404 case 4:
405 /* kludge to get 4 physical pages for ARGB cursor */
406 page = i8xx_alloc_pages();
407 break;
408 default:
409 return NULL;
410 }
411
412 if (page == NULL)
413 return NULL;
414
415 new = agp_create_memory(pg_count);
416 if (new == NULL)
417 return NULL;
418
419 new->pages[0] = page;
420 if (pg_count == 4) {
421 /* kludge to get 4 physical pages for ARGB cursor */
422 new->pages[1] = new->pages[0] + 1;
423 new->pages[2] = new->pages[1] + 1;
424 new->pages[3] = new->pages[2] + 1;
425 }
426 new->page_count = pg_count;
427 new->num_scratch_pages = pg_count;
428 new->type = AGP_PHYS_MEMORY;
429 new->physical = page_to_phys(new->pages[0]);
430 return new;
431}
432
433static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
434{
435 struct agp_memory *new;
436
437 if (type == AGP_DCACHE_MEMORY) {
438 if (pg_count != intel_private.num_dcache_entries)
439 return NULL;
440
441 new = agp_create_memory(1);
442 if (new == NULL)
443 return NULL;
444
445 new->type = AGP_DCACHE_MEMORY;
446 new->page_count = pg_count;
447 new->num_scratch_pages = 0;
448 agp_free_page_array(new);
449 return new;
450 }
451 if (type == AGP_PHYS_MEMORY)
452 return alloc_agpphysmem_i8xx(pg_count, type);
453 return NULL;
454}
455
456static void intel_i810_free_by_type(struct agp_memory *curr)
457{
458 agp_free_key(curr->key);
459 if (curr->type == AGP_PHYS_MEMORY) {
460 if (curr->page_count == 4)
461 i8xx_destroy_pages(curr->pages[0]);
462 else {
463 agp_bridge->driver->agp_destroy_page(curr->pages[0],
464 AGP_PAGE_DESTROY_UNMAP);
465 agp_bridge->driver->agp_destroy_page(curr->pages[0],
466 AGP_PAGE_DESTROY_FREE);
467 }
468 agp_free_page_array(curr);
469 }
470 kfree(curr);
471}
472
473static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
474 dma_addr_t addr, int type)
475{
476 /* Type checking must be done elsewhere */
477 return addr | bridge->driver->masks[type].mask;
478}
479
480static struct aper_size_info_fixed intel_i830_sizes[] =
481{
482 {128, 32768, 5},
483 /* The 64M mode still requires a 128k gatt */
484 {64, 16384, 5},
485 {256, 65536, 6},
486 {512, 131072, 7},
487};
488
489static void intel_i830_init_gtt_entries(void)
490{
491 u16 gmch_ctrl;
492 int gtt_entries = 0;
493 u8 rdct;
494 int local = 0;
495 static const int ddt[4] = { 0, 16, 32, 64 };
496 int size; /* reserved space (in kb) at the top of stolen memory */
497
498 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
499
500 if (IS_I965) {
501 u32 pgetbl_ctl;
502 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
503
504 /* The 965 has a field telling us the size of the GTT,
505 * which may be larger than what is necessary to map the
506 * aperture.
507 */
508 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
509 case I965_PGETBL_SIZE_128KB:
510 size = 128;
511 break;
512 case I965_PGETBL_SIZE_256KB:
513 size = 256;
514 break;
515 case I965_PGETBL_SIZE_512KB:
516 size = 512;
517 break;
518 case I965_PGETBL_SIZE_1MB:
519 size = 1024;
520 break;
521 case I965_PGETBL_SIZE_2MB:
522 size = 2048;
523 break;
524 case I965_PGETBL_SIZE_1_5MB:
525 size = 1024 + 512;
526 break;
527 default:
528 dev_info(&intel_private.pcidev->dev,
529 "unknown page table size, assuming 512KB\n");
530 size = 512;
531 }
532 size += 4; /* add in BIOS popup space */
533 } else if (IS_G33 && !IS_PINEVIEW) {
534 /* G33's GTT size defined in gmch_ctrl */
535 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
536 case G33_PGETBL_SIZE_1M:
537 size = 1024;
538 break;
539 case G33_PGETBL_SIZE_2M:
540 size = 2048;
541 break;
542 default:
543 dev_info(&agp_bridge->dev->dev,
544 "unknown page table size 0x%x, assuming 512KB\n",
545 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
546 size = 512;
547 }
548 size += 4;
549 } else if (IS_G4X || IS_PINEVIEW) {
550 /* On 4 series hardware, GTT stolen is separate from graphics
551 * stolen, ignore it in stolen gtt entries counting. However,
552 * 4KB of the stolen memory doesn't get mapped to the GTT.
553 */
554 size = 4;
555 } else {
556 /* On previous hardware, the GTT size was just what was
557 * required to map the aperture.
558 */
559 size = agp_bridge->driver->fetch_size() + 4;
560 }
561
562 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
563 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
564 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
565 case I830_GMCH_GMS_STOLEN_512:
566 gtt_entries = KB(512) - KB(size);
567 break;
568 case I830_GMCH_GMS_STOLEN_1024:
569 gtt_entries = MB(1) - KB(size);
570 break;
571 case I830_GMCH_GMS_STOLEN_8192:
572 gtt_entries = MB(8) - KB(size);
573 break;
574 case I830_GMCH_GMS_LOCAL:
575 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
576 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
577 MB(ddt[I830_RDRAM_DDT(rdct)]);
578 local = 1;
579 break;
580 default:
581 gtt_entries = 0;
582 break;
583 }
584 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
585 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
586 /*
587 * SandyBridge has new memory control reg at 0x50.w
588 */
589 u16 snb_gmch_ctl;
590 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
591 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
592 case SNB_GMCH_GMS_STOLEN_32M:
593 gtt_entries = MB(32) - KB(size);
594 break;
595 case SNB_GMCH_GMS_STOLEN_64M:
596 gtt_entries = MB(64) - KB(size);
597 break;
598 case SNB_GMCH_GMS_STOLEN_96M:
599 gtt_entries = MB(96) - KB(size);
600 break;
601 case SNB_GMCH_GMS_STOLEN_128M:
602 gtt_entries = MB(128) - KB(size);
603 break;
604 case SNB_GMCH_GMS_STOLEN_160M:
605 gtt_entries = MB(160) - KB(size);
606 break;
607 case SNB_GMCH_GMS_STOLEN_192M:
608 gtt_entries = MB(192) - KB(size);
609 break;
610 case SNB_GMCH_GMS_STOLEN_224M:
611 gtt_entries = MB(224) - KB(size);
612 break;
613 case SNB_GMCH_GMS_STOLEN_256M:
614 gtt_entries = MB(256) - KB(size);
615 break;
616 case SNB_GMCH_GMS_STOLEN_288M:
617 gtt_entries = MB(288) - KB(size);
618 break;
619 case SNB_GMCH_GMS_STOLEN_320M:
620 gtt_entries = MB(320) - KB(size);
621 break;
622 case SNB_GMCH_GMS_STOLEN_352M:
623 gtt_entries = MB(352) - KB(size);
624 break;
625 case SNB_GMCH_GMS_STOLEN_384M:
626 gtt_entries = MB(384) - KB(size);
627 break;
628 case SNB_GMCH_GMS_STOLEN_416M:
629 gtt_entries = MB(416) - KB(size);
630 break;
631 case SNB_GMCH_GMS_STOLEN_448M:
632 gtt_entries = MB(448) - KB(size);
633 break;
634 case SNB_GMCH_GMS_STOLEN_480M:
635 gtt_entries = MB(480) - KB(size);
636 break;
637 case SNB_GMCH_GMS_STOLEN_512M:
638 gtt_entries = MB(512) - KB(size);
639 break;
640 }
641 } else {
642 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
643 case I855_GMCH_GMS_STOLEN_1M:
644 gtt_entries = MB(1) - KB(size);
645 break;
646 case I855_GMCH_GMS_STOLEN_4M:
647 gtt_entries = MB(4) - KB(size);
648 break;
649 case I855_GMCH_GMS_STOLEN_8M:
650 gtt_entries = MB(8) - KB(size);
651 break;
652 case I855_GMCH_GMS_STOLEN_16M:
653 gtt_entries = MB(16) - KB(size);
654 break;
655 case I855_GMCH_GMS_STOLEN_32M:
656 gtt_entries = MB(32) - KB(size);
657 break;
658 case I915_GMCH_GMS_STOLEN_48M:
659 /* Check it's really I915G */
660 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
661 gtt_entries = MB(48) - KB(size);
662 else
663 gtt_entries = 0;
664 break;
665 case I915_GMCH_GMS_STOLEN_64M:
666 /* Check it's really I915G */
667 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
668 gtt_entries = MB(64) - KB(size);
669 else
670 gtt_entries = 0;
671 break;
672 case G33_GMCH_GMS_STOLEN_128M:
673 if (IS_G33 || IS_I965 || IS_G4X)
674 gtt_entries = MB(128) - KB(size);
675 else
676 gtt_entries = 0;
677 break;
678 case G33_GMCH_GMS_STOLEN_256M:
679 if (IS_G33 || IS_I965 || IS_G4X)
680 gtt_entries = MB(256) - KB(size);
681 else
682 gtt_entries = 0;
683 break;
684 case INTEL_GMCH_GMS_STOLEN_96M:
685 if (IS_I965 || IS_G4X)
686 gtt_entries = MB(96) - KB(size);
687 else
688 gtt_entries = 0;
689 break;
690 case INTEL_GMCH_GMS_STOLEN_160M:
691 if (IS_I965 || IS_G4X)
692 gtt_entries = MB(160) - KB(size);
693 else
694 gtt_entries = 0;
695 break;
696 case INTEL_GMCH_GMS_STOLEN_224M:
697 if (IS_I965 || IS_G4X)
698 gtt_entries = MB(224) - KB(size);
699 else
700 gtt_entries = 0;
701 break;
702 case INTEL_GMCH_GMS_STOLEN_352M:
703 if (IS_I965 || IS_G4X)
704 gtt_entries = MB(352) - KB(size);
705 else
706 gtt_entries = 0;
707 break;
708 default:
709 gtt_entries = 0;
710 break;
711 }
712 }
713 if (gtt_entries > 0) {
714 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
715 gtt_entries / KB(1), local ? "local" : "stolen");
716 gtt_entries /= KB(4);
717 } else {
718 dev_info(&agp_bridge->dev->dev,
719 "no pre-allocated video memory detected\n");
720 gtt_entries = 0;
721 }
722
723 intel_private.gtt_entries = gtt_entries;
724}
725
726static void intel_i830_fini_flush(void)
727{
728 kunmap(intel_private.i8xx_page);
729 intel_private.i8xx_flush_page = NULL;
730 unmap_page_from_agp(intel_private.i8xx_page);
731
732 __free_page(intel_private.i8xx_page);
733 intel_private.i8xx_page = NULL;
734}
735
736static void intel_i830_setup_flush(void)
737{
738 /* return if we've already set the flush mechanism up */
739 if (intel_private.i8xx_page)
740 return;
741
742 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
743 if (!intel_private.i8xx_page)
744 return;
745
746 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
747 if (!intel_private.i8xx_flush_page)
748 intel_i830_fini_flush();
749}
750
751/* The chipset_flush interface needs to get data that has already been
752 * flushed out of the CPU all the way out to main memory, because the GPU
753 * doesn't snoop those buffers.
754 *
755 * The 8xx series doesn't have the same lovely interface for flushing the
756 * chipset write buffers that the later chips do. According to the 865
757 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
758 * that buffer out, we just fill 1KB and clflush it out, on the assumption
759 * that it'll push whatever was in there out. It appears to work.
760 */
761static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
762{
763 unsigned int *pg = intel_private.i8xx_flush_page;
764
765 memset(pg, 0, 1024);
766
767 if (cpu_has_clflush)
768 clflush_cache_range(pg, 1024);
769 else if (wbinvd_on_all_cpus() != 0)
770 printk(KERN_ERR "Timed out waiting for cache flush.\n");
771}
772
773/* The intel i830 automatically initializes the agp aperture during POST.
774 * Use the memory already set aside for in the GTT.
775 */
776static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
777{
778 int page_order;
779 struct aper_size_info_fixed *size;
780 int num_entries;
781 u32 temp;
782
783 size = agp_bridge->current_size;
784 page_order = size->page_order;
785 num_entries = size->num_entries;
786 agp_bridge->gatt_table_real = NULL;
787
788 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
789 temp &= 0xfff80000;
790
791 intel_private.registers = ioremap(temp, 128 * 4096);
792 if (!intel_private.registers)
793 return -ENOMEM;
794
795 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
796 global_cache_flush(); /* FIXME: ?? */
797
798 /* we have to call this as early as possible after the MMIO base address is known */
799 intel_i830_init_gtt_entries();
Ondrej Zary8699be32010-06-16 10:13:52 +0200800 if (intel_private.gtt_entries == 0) {
801 iounmap(intel_private.registers);
802 return -ENOMEM;
803 }
Daniel Vetterf51b7662010-04-14 00:29:52 +0200804
805 agp_bridge->gatt_table = NULL;
806
807 agp_bridge->gatt_bus_addr = temp;
808
809 return 0;
810}
811
812/* Return the gatt table to a sane state. Use the top of stolen
813 * memory for the GTT.
814 */
815static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
816{
817 return 0;
818}
819
820static int intel_i830_fetch_size(void)
821{
822 u16 gmch_ctrl;
823 struct aper_size_info_fixed *values;
824
825 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
826
827 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
828 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
829 /* 855GM/852GM/865G has 128MB aperture size */
Daniel Vettere1583162010-04-14 00:29:58 +0200830 agp_bridge->current_size = (void *) values;
Daniel Vetterf51b7662010-04-14 00:29:52 +0200831 agp_bridge->aperture_size_idx = 0;
832 return values[0].size;
833 }
834
835 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
836
837 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
Daniel Vettere1583162010-04-14 00:29:58 +0200838 agp_bridge->current_size = (void *) values;
Daniel Vetterf51b7662010-04-14 00:29:52 +0200839 agp_bridge->aperture_size_idx = 0;
840 return values[0].size;
841 } else {
Daniel Vettere1583162010-04-14 00:29:58 +0200842 agp_bridge->current_size = (void *) (values + 1);
Daniel Vetterf51b7662010-04-14 00:29:52 +0200843 agp_bridge->aperture_size_idx = 1;
844 return values[1].size;
845 }
846
847 return 0;
848}
849
850static int intel_i830_configure(void)
851{
852 struct aper_size_info_fixed *current_size;
853 u32 temp;
854 u16 gmch_ctrl;
855 int i;
856
857 current_size = A_SIZE_FIX(agp_bridge->current_size);
858
859 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
860 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
861
862 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
863 gmch_ctrl |= I830_GMCH_ENABLED;
864 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
865
866 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
867 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
868
869 if (agp_bridge->driver->needs_scratch_page) {
870 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
871 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
872 }
873 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
874 }
875
876 global_cache_flush();
877
878 intel_i830_setup_flush();
879 return 0;
880}
881
882static void intel_i830_cleanup(void)
883{
884 iounmap(intel_private.registers);
885}
886
887static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
888 int type)
889{
890 int i, j, num_entries;
891 void *temp;
892 int ret = -EINVAL;
893 int mask_type;
894
895 if (mem->page_count == 0)
896 goto out;
897
898 temp = agp_bridge->current_size;
899 num_entries = A_SIZE_FIX(temp)->num_entries;
900
901 if (pg_start < intel_private.gtt_entries) {
902 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
903 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
904 pg_start, intel_private.gtt_entries);
905
906 dev_info(&intel_private.pcidev->dev,
907 "trying to insert into local/stolen memory\n");
908 goto out_err;
909 }
910
911 if ((pg_start + mem->page_count) > num_entries)
912 goto out_err;
913
914 /* The i830 can't check the GTT for entries since its read only,
915 * depend on the caller to make the correct offset decisions.
916 */
917
918 if (type != mem->type)
919 goto out_err;
920
921 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
922
923 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
924 mask_type != INTEL_AGP_CACHED_MEMORY)
925 goto out_err;
926
927 if (!mem->is_flushed)
928 global_cache_flush();
929
930 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
931 writel(agp_bridge->driver->mask_memory(agp_bridge,
932 page_to_phys(mem->pages[i]), mask_type),
933 intel_private.registers+I810_PTE_BASE+(j*4));
934 }
935 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
Daniel Vetterf51b7662010-04-14 00:29:52 +0200936
937out:
938 ret = 0;
939out_err:
940 mem->is_flushed = true;
941 return ret;
942}
943
944static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
945 int type)
946{
947 int i;
948
949 if (mem->page_count == 0)
950 return 0;
951
952 if (pg_start < intel_private.gtt_entries) {
953 dev_info(&intel_private.pcidev->dev,
954 "trying to disable local/stolen memory\n");
955 return -EINVAL;
956 }
957
958 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
959 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
960 }
961 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
962
Daniel Vetterf51b7662010-04-14 00:29:52 +0200963 return 0;
964}
965
966static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
967{
968 if (type == AGP_PHYS_MEMORY)
969 return alloc_agpphysmem_i8xx(pg_count, type);
970 /* always return NULL for other allocation types for now */
971 return NULL;
972}
973
974static int intel_alloc_chipset_flush_resource(void)
975{
976 int ret;
977 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
978 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
979 pcibios_align_resource, agp_bridge->dev);
980
981 return ret;
982}
983
984static void intel_i915_setup_chipset_flush(void)
985{
986 int ret;
987 u32 temp;
988
989 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
990 if (!(temp & 0x1)) {
991 intel_alloc_chipset_flush_resource();
992 intel_private.resource_valid = 1;
993 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
994 } else {
995 temp &= ~1;
996
997 intel_private.resource_valid = 1;
998 intel_private.ifp_resource.start = temp;
999 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1000 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1001 /* some BIOSes reserve this area in a pnp some don't */
1002 if (ret)
1003 intel_private.resource_valid = 0;
1004 }
1005}
1006
1007static void intel_i965_g33_setup_chipset_flush(void)
1008{
1009 u32 temp_hi, temp_lo;
1010 int ret;
1011
1012 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1013 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1014
1015 if (!(temp_lo & 0x1)) {
1016
1017 intel_alloc_chipset_flush_resource();
1018
1019 intel_private.resource_valid = 1;
1020 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1021 upper_32_bits(intel_private.ifp_resource.start));
1022 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1023 } else {
1024 u64 l64;
1025
1026 temp_lo &= ~0x1;
1027 l64 = ((u64)temp_hi << 32) | temp_lo;
1028
1029 intel_private.resource_valid = 1;
1030 intel_private.ifp_resource.start = l64;
1031 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1032 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1033 /* some BIOSes reserve this area in a pnp some don't */
1034 if (ret)
1035 intel_private.resource_valid = 0;
1036 }
1037}
1038
1039static void intel_i9xx_setup_flush(void)
1040{
1041 /* return if already configured */
1042 if (intel_private.ifp_resource.start)
1043 return;
1044
1045 if (IS_SNB)
1046 return;
1047
1048 /* setup a resource for this object */
1049 intel_private.ifp_resource.name = "Intel Flush Page";
1050 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1051
1052 /* Setup chipset flush for 915 */
1053 if (IS_I965 || IS_G33 || IS_G4X) {
1054 intel_i965_g33_setup_chipset_flush();
1055 } else {
1056 intel_i915_setup_chipset_flush();
1057 }
1058
1059 if (intel_private.ifp_resource.start) {
1060 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1061 if (!intel_private.i9xx_flush_page)
1062 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
1063 }
1064}
1065
Chris Wilsonf1befe72010-05-18 12:24:51 +01001066static int intel_i9xx_configure(void)
Daniel Vetterf51b7662010-04-14 00:29:52 +02001067{
1068 struct aper_size_info_fixed *current_size;
1069 u32 temp;
1070 u16 gmch_ctrl;
1071 int i;
1072
1073 current_size = A_SIZE_FIX(agp_bridge->current_size);
1074
1075 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1076
1077 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1078
1079 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1080 gmch_ctrl |= I830_GMCH_ENABLED;
1081 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1082
1083 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1084 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1085
1086 if (agp_bridge->driver->needs_scratch_page) {
1087 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1088 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1089 }
1090 readl(intel_private.gtt+i-1); /* PCI Posting. */
1091 }
1092
1093 global_cache_flush();
1094
1095 intel_i9xx_setup_flush();
1096
1097 return 0;
1098}
1099
1100static void intel_i915_cleanup(void)
1101{
1102 if (intel_private.i9xx_flush_page)
1103 iounmap(intel_private.i9xx_flush_page);
1104 if (intel_private.resource_valid)
1105 release_resource(&intel_private.ifp_resource);
1106 intel_private.ifp_resource.start = 0;
1107 intel_private.resource_valid = 0;
1108 iounmap(intel_private.gtt);
1109 iounmap(intel_private.registers);
1110}
1111
1112static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1113{
1114 if (intel_private.i9xx_flush_page)
1115 writel(1, intel_private.i9xx_flush_page);
1116}
1117
1118static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1119 int type)
1120{
1121 int num_entries;
1122 void *temp;
1123 int ret = -EINVAL;
1124 int mask_type;
1125
1126 if (mem->page_count == 0)
1127 goto out;
1128
1129 temp = agp_bridge->current_size;
1130 num_entries = A_SIZE_FIX(temp)->num_entries;
1131
1132 if (pg_start < intel_private.gtt_entries) {
1133 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1134 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1135 pg_start, intel_private.gtt_entries);
1136
1137 dev_info(&intel_private.pcidev->dev,
1138 "trying to insert into local/stolen memory\n");
1139 goto out_err;
1140 }
1141
1142 if ((pg_start + mem->page_count) > num_entries)
1143 goto out_err;
1144
1145 /* The i915 can't check the GTT for entries since it's read only;
1146 * depend on the caller to make the correct offset decisions.
1147 */
1148
1149 if (type != mem->type)
1150 goto out_err;
1151
1152 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1153
1154 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1155 mask_type != INTEL_AGP_CACHED_MEMORY)
1156 goto out_err;
1157
1158 if (!mem->is_flushed)
1159 global_cache_flush();
1160
1161 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
Daniel Vetterf51b7662010-04-14 00:29:52 +02001162
1163 out:
1164 ret = 0;
1165 out_err:
1166 mem->is_flushed = true;
1167 return ret;
1168}
1169
1170static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1171 int type)
1172{
1173 int i;
1174
1175 if (mem->page_count == 0)
1176 return 0;
1177
1178 if (pg_start < intel_private.gtt_entries) {
1179 dev_info(&intel_private.pcidev->dev,
1180 "trying to disable local/stolen memory\n");
1181 return -EINVAL;
1182 }
1183
1184 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1185 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1186
1187 readl(intel_private.gtt+i-1);
1188
Daniel Vetterf51b7662010-04-14 00:29:52 +02001189 return 0;
1190}
1191
1192/* Return the aperture size by just checking the resource length. The effect
1193 * described in the spec of the MSAC registers is just changing of the
1194 * resource size.
1195 */
1196static int intel_i9xx_fetch_size(void)
1197{
1198 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1199 int aper_size; /* size in megabytes */
1200 int i;
1201
1202 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1203
1204 for (i = 0; i < num_sizes; i++) {
1205 if (aper_size == intel_i830_sizes[i].size) {
1206 agp_bridge->current_size = intel_i830_sizes + i;
Daniel Vetterf51b7662010-04-14 00:29:52 +02001207 return aper_size;
1208 }
1209 }
1210
1211 return 0;
1212}
1213
Chris Wilsonf1befe72010-05-18 12:24:51 +01001214static int intel_i915_get_gtt_size(void)
1215{
1216 int size;
1217
1218 if (IS_G33) {
1219 u16 gmch_ctrl;
1220
1221 /* G33's GTT size defined in gmch_ctrl */
1222 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1223 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
1224 case G33_PGETBL_SIZE_1M:
1225 size = 1024;
1226 break;
1227 case G33_PGETBL_SIZE_2M:
1228 size = 2048;
1229 break;
1230 default:
1231 dev_info(&agp_bridge->dev->dev,
1232 "unknown page table size 0x%x, assuming 512KB\n",
1233 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
1234 size = 512;
1235 }
1236 } else {
1237 /* On previous hardware, the GTT size was just what was
1238 * required to map the aperture.
1239 */
1240 size = agp_bridge->driver->fetch_size();
1241 }
1242
1243 return KB(size);
1244}
1245
Daniel Vetterf51b7662010-04-14 00:29:52 +02001246/* The intel i915 automatically initializes the agp aperture during POST.
1247 * Use the memory already set aside for in the GTT.
1248 */
1249static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1250{
1251 int page_order;
1252 struct aper_size_info_fixed *size;
1253 int num_entries;
1254 u32 temp, temp2;
Chris Wilsonf1befe72010-05-18 12:24:51 +01001255 int gtt_map_size;
Daniel Vetterf51b7662010-04-14 00:29:52 +02001256
1257 size = agp_bridge->current_size;
1258 page_order = size->page_order;
1259 num_entries = size->num_entries;
1260 agp_bridge->gatt_table_real = NULL;
1261
1262 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1263 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1264
Chris Wilsonf1befe72010-05-18 12:24:51 +01001265 gtt_map_size = intel_i915_get_gtt_size();
1266
Daniel Vetterf51b7662010-04-14 00:29:52 +02001267 intel_private.gtt = ioremap(temp2, gtt_map_size);
1268 if (!intel_private.gtt)
1269 return -ENOMEM;
1270
1271 intel_private.gtt_total_size = gtt_map_size / 4;
1272
1273 temp &= 0xfff80000;
1274
1275 intel_private.registers = ioremap(temp, 128 * 4096);
1276 if (!intel_private.registers) {
1277 iounmap(intel_private.gtt);
1278 return -ENOMEM;
1279 }
1280
1281 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1282 global_cache_flush(); /* FIXME: ? */
1283
1284 /* we have to call this as early as possible after the MMIO base address is known */
1285 intel_i830_init_gtt_entries();
Ondrej Zary8699be32010-06-16 10:13:52 +02001286 if (intel_private.gtt_entries == 0) {
1287 iounmap(intel_private.gtt);
1288 iounmap(intel_private.registers);
1289 return -ENOMEM;
1290 }
Daniel Vetterf51b7662010-04-14 00:29:52 +02001291
1292 agp_bridge->gatt_table = NULL;
1293
1294 agp_bridge->gatt_bus_addr = temp;
1295
1296 return 0;
1297}
1298
1299/*
1300 * The i965 supports 36-bit physical addresses, but to keep
1301 * the format of the GTT the same, the bits that don't fit
1302 * in a 32-bit word are shifted down to bits 4..7.
1303 *
1304 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1305 * is always zero on 32-bit architectures, so no need to make
1306 * this conditional.
1307 */
1308static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1309 dma_addr_t addr, int type)
1310{
1311 /* Shift high bits down */
1312 addr |= (addr >> 28) & 0xf0;
1313
1314 /* Type checking must be done elsewhere */
1315 return addr | bridge->driver->masks[type].mask;
1316}
1317
Zhenyu Wang3869d4a2010-07-09 10:40:58 -07001318static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1319 dma_addr_t addr, int type)
1320{
1321 /* Shift high bits down */
1322 addr |= (addr >> 28) & 0xff;
1323
1324 /* Type checking must be done elsewhere */
1325 return addr | bridge->driver->masks[type].mask;
1326}
1327
Daniel Vetterf51b7662010-04-14 00:29:52 +02001328static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1329{
1330 u16 snb_gmch_ctl;
1331
1332 switch (agp_bridge->dev->device) {
1333 case PCI_DEVICE_ID_INTEL_GM45_HB:
1334 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1335 case PCI_DEVICE_ID_INTEL_Q45_HB:
1336 case PCI_DEVICE_ID_INTEL_G45_HB:
1337 case PCI_DEVICE_ID_INTEL_G41_HB:
1338 case PCI_DEVICE_ID_INTEL_B43_HB:
1339 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1340 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1341 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1342 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1343 *gtt_offset = *gtt_size = MB(2);
1344 break;
1345 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1346 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1347 *gtt_offset = MB(2);
1348
1349 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1350 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1351 default:
1352 case SNB_GTT_SIZE_0M:
1353 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1354 *gtt_size = MB(0);
1355 break;
1356 case SNB_GTT_SIZE_1M:
1357 *gtt_size = MB(1);
1358 break;
1359 case SNB_GTT_SIZE_2M:
1360 *gtt_size = MB(2);
1361 break;
1362 }
1363 break;
1364 default:
1365 *gtt_offset = *gtt_size = KB(512);
1366 }
1367}
1368
1369/* The intel i965 automatically initializes the agp aperture during POST.
1370 * Use the memory already set aside for in the GTT.
1371 */
1372static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1373{
1374 int page_order;
1375 struct aper_size_info_fixed *size;
1376 int num_entries;
1377 u32 temp;
1378 int gtt_offset, gtt_size;
1379
1380 size = agp_bridge->current_size;
1381 page_order = size->page_order;
1382 num_entries = size->num_entries;
1383 agp_bridge->gatt_table_real = NULL;
1384
1385 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1386
1387 temp &= 0xfff00000;
1388
1389 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1390
1391 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1392
1393 if (!intel_private.gtt)
1394 return -ENOMEM;
1395
1396 intel_private.gtt_total_size = gtt_size / 4;
1397
1398 intel_private.registers = ioremap(temp, 128 * 4096);
1399 if (!intel_private.registers) {
1400 iounmap(intel_private.gtt);
1401 return -ENOMEM;
1402 }
1403
1404 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1405 global_cache_flush(); /* FIXME: ? */
1406
1407 /* we have to call this as early as possible after the MMIO base address is known */
1408 intel_i830_init_gtt_entries();
Ondrej Zary8699be32010-06-16 10:13:52 +02001409 if (intel_private.gtt_entries == 0) {
1410 iounmap(intel_private.gtt);
1411 iounmap(intel_private.registers);
1412 return -ENOMEM;
1413 }
Daniel Vetterf51b7662010-04-14 00:29:52 +02001414
1415 agp_bridge->gatt_table = NULL;
1416
1417 agp_bridge->gatt_bus_addr = temp;
1418
1419 return 0;
1420}
1421
1422static const struct agp_bridge_driver intel_810_driver = {
1423 .owner = THIS_MODULE,
1424 .aperture_sizes = intel_i810_sizes,
1425 .size_type = FIXED_APER_SIZE,
1426 .num_aperture_sizes = 2,
1427 .needs_scratch_page = true,
1428 .configure = intel_i810_configure,
1429 .fetch_size = intel_i810_fetch_size,
1430 .cleanup = intel_i810_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001431 .mask_memory = intel_i810_mask_memory,
1432 .masks = intel_i810_masks,
1433 .agp_enable = intel_i810_agp_enable,
1434 .cache_flush = global_cache_flush,
1435 .create_gatt_table = agp_generic_create_gatt_table,
1436 .free_gatt_table = agp_generic_free_gatt_table,
1437 .insert_memory = intel_i810_insert_entries,
1438 .remove_memory = intel_i810_remove_entries,
1439 .alloc_by_type = intel_i810_alloc_by_type,
1440 .free_by_type = intel_i810_free_by_type,
1441 .agp_alloc_page = agp_generic_alloc_page,
1442 .agp_alloc_pages = agp_generic_alloc_pages,
1443 .agp_destroy_page = agp_generic_destroy_page,
1444 .agp_destroy_pages = agp_generic_destroy_pages,
1445 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1446};
1447
1448static const struct agp_bridge_driver intel_830_driver = {
1449 .owner = THIS_MODULE,
1450 .aperture_sizes = intel_i830_sizes,
1451 .size_type = FIXED_APER_SIZE,
1452 .num_aperture_sizes = 4,
1453 .needs_scratch_page = true,
1454 .configure = intel_i830_configure,
1455 .fetch_size = intel_i830_fetch_size,
1456 .cleanup = intel_i830_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001457 .mask_memory = intel_i810_mask_memory,
1458 .masks = intel_i810_masks,
1459 .agp_enable = intel_i810_agp_enable,
1460 .cache_flush = global_cache_flush,
1461 .create_gatt_table = intel_i830_create_gatt_table,
1462 .free_gatt_table = intel_i830_free_gatt_table,
1463 .insert_memory = intel_i830_insert_entries,
1464 .remove_memory = intel_i830_remove_entries,
1465 .alloc_by_type = intel_i830_alloc_by_type,
1466 .free_by_type = intel_i810_free_by_type,
1467 .agp_alloc_page = agp_generic_alloc_page,
1468 .agp_alloc_pages = agp_generic_alloc_pages,
1469 .agp_destroy_page = agp_generic_destroy_page,
1470 .agp_destroy_pages = agp_generic_destroy_pages,
1471 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1472 .chipset_flush = intel_i830_chipset_flush,
1473};
1474
1475static const struct agp_bridge_driver intel_915_driver = {
1476 .owner = THIS_MODULE,
1477 .aperture_sizes = intel_i830_sizes,
1478 .size_type = FIXED_APER_SIZE,
1479 .num_aperture_sizes = 4,
1480 .needs_scratch_page = true,
Chris Wilsonf1befe72010-05-18 12:24:51 +01001481 .configure = intel_i9xx_configure,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001482 .fetch_size = intel_i9xx_fetch_size,
1483 .cleanup = intel_i915_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001484 .mask_memory = intel_i810_mask_memory,
1485 .masks = intel_i810_masks,
1486 .agp_enable = intel_i810_agp_enable,
1487 .cache_flush = global_cache_flush,
1488 .create_gatt_table = intel_i915_create_gatt_table,
1489 .free_gatt_table = intel_i830_free_gatt_table,
1490 .insert_memory = intel_i915_insert_entries,
1491 .remove_memory = intel_i915_remove_entries,
1492 .alloc_by_type = intel_i830_alloc_by_type,
1493 .free_by_type = intel_i810_free_by_type,
1494 .agp_alloc_page = agp_generic_alloc_page,
1495 .agp_alloc_pages = agp_generic_alloc_pages,
1496 .agp_destroy_page = agp_generic_destroy_page,
1497 .agp_destroy_pages = agp_generic_destroy_pages,
1498 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1499 .chipset_flush = intel_i915_chipset_flush,
1500#ifdef USE_PCI_DMA_API
1501 .agp_map_page = intel_agp_map_page,
1502 .agp_unmap_page = intel_agp_unmap_page,
1503 .agp_map_memory = intel_agp_map_memory,
1504 .agp_unmap_memory = intel_agp_unmap_memory,
1505#endif
1506};
1507
1508static const struct agp_bridge_driver intel_i965_driver = {
1509 .owner = THIS_MODULE,
1510 .aperture_sizes = intel_i830_sizes,
1511 .size_type = FIXED_APER_SIZE,
1512 .num_aperture_sizes = 4,
1513 .needs_scratch_page = true,
Chris Wilsonf1befe72010-05-18 12:24:51 +01001514 .configure = intel_i9xx_configure,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001515 .fetch_size = intel_i9xx_fetch_size,
1516 .cleanup = intel_i915_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001517 .mask_memory = intel_i965_mask_memory,
1518 .masks = intel_i810_masks,
1519 .agp_enable = intel_i810_agp_enable,
1520 .cache_flush = global_cache_flush,
1521 .create_gatt_table = intel_i965_create_gatt_table,
1522 .free_gatt_table = intel_i830_free_gatt_table,
1523 .insert_memory = intel_i915_insert_entries,
1524 .remove_memory = intel_i915_remove_entries,
1525 .alloc_by_type = intel_i830_alloc_by_type,
1526 .free_by_type = intel_i810_free_by_type,
1527 .agp_alloc_page = agp_generic_alloc_page,
1528 .agp_alloc_pages = agp_generic_alloc_pages,
1529 .agp_destroy_page = agp_generic_destroy_page,
1530 .agp_destroy_pages = agp_generic_destroy_pages,
1531 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1532 .chipset_flush = intel_i915_chipset_flush,
1533#ifdef USE_PCI_DMA_API
1534 .agp_map_page = intel_agp_map_page,
1535 .agp_unmap_page = intel_agp_unmap_page,
1536 .agp_map_memory = intel_agp_map_memory,
1537 .agp_unmap_memory = intel_agp_unmap_memory,
1538#endif
1539};
1540
Zhenyu Wang3869d4a2010-07-09 10:40:58 -07001541static const struct agp_bridge_driver intel_gen6_driver = {
1542 .owner = THIS_MODULE,
1543 .aperture_sizes = intel_i830_sizes,
1544 .size_type = FIXED_APER_SIZE,
1545 .num_aperture_sizes = 4,
1546 .needs_scratch_page = true,
1547 .configure = intel_i9xx_configure,
1548 .fetch_size = intel_i9xx_fetch_size,
1549 .cleanup = intel_i915_cleanup,
1550 .mask_memory = intel_gen6_mask_memory,
1551 .masks = intel_i810_masks,
1552 .agp_enable = intel_i810_agp_enable,
1553 .cache_flush = global_cache_flush,
1554 .create_gatt_table = intel_i965_create_gatt_table,
1555 .free_gatt_table = intel_i830_free_gatt_table,
1556 .insert_memory = intel_i915_insert_entries,
1557 .remove_memory = intel_i915_remove_entries,
1558 .alloc_by_type = intel_i830_alloc_by_type,
1559 .free_by_type = intel_i810_free_by_type,
1560 .agp_alloc_page = agp_generic_alloc_page,
1561 .agp_alloc_pages = agp_generic_alloc_pages,
1562 .agp_destroy_page = agp_generic_destroy_page,
1563 .agp_destroy_pages = agp_generic_destroy_pages,
1564 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1565 .chipset_flush = intel_i915_chipset_flush,
1566#ifdef USE_PCI_DMA_API
1567 .agp_map_page = intel_agp_map_page,
1568 .agp_unmap_page = intel_agp_unmap_page,
1569 .agp_map_memory = intel_agp_map_memory,
1570 .agp_unmap_memory = intel_agp_unmap_memory,
1571#endif
1572};
1573
Daniel Vetterf51b7662010-04-14 00:29:52 +02001574static const struct agp_bridge_driver intel_g33_driver = {
1575 .owner = THIS_MODULE,
1576 .aperture_sizes = intel_i830_sizes,
1577 .size_type = FIXED_APER_SIZE,
1578 .num_aperture_sizes = 4,
1579 .needs_scratch_page = true,
Chris Wilsonf1befe72010-05-18 12:24:51 +01001580 .configure = intel_i9xx_configure,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001581 .fetch_size = intel_i9xx_fetch_size,
1582 .cleanup = intel_i915_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001583 .mask_memory = intel_i965_mask_memory,
1584 .masks = intel_i810_masks,
1585 .agp_enable = intel_i810_agp_enable,
1586 .cache_flush = global_cache_flush,
1587 .create_gatt_table = intel_i915_create_gatt_table,
1588 .free_gatt_table = intel_i830_free_gatt_table,
1589 .insert_memory = intel_i915_insert_entries,
1590 .remove_memory = intel_i915_remove_entries,
1591 .alloc_by_type = intel_i830_alloc_by_type,
1592 .free_by_type = intel_i810_free_by_type,
1593 .agp_alloc_page = agp_generic_alloc_page,
1594 .agp_alloc_pages = agp_generic_alloc_pages,
1595 .agp_destroy_page = agp_generic_destroy_page,
1596 .agp_destroy_pages = agp_generic_destroy_pages,
1597 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1598 .chipset_flush = intel_i915_chipset_flush,
1599#ifdef USE_PCI_DMA_API
1600 .agp_map_page = intel_agp_map_page,
1601 .agp_unmap_page = intel_agp_unmap_page,
1602 .agp_map_memory = intel_agp_map_memory,
1603 .agp_unmap_memory = intel_agp_unmap_memory,
1604#endif
1605};