blob: ce536e68b6c653202dd210ebd9bae546c3e2c54f [file] [log] [blame]
Daniel Vetterf51b7662010-04-14 00:29:52 +02001/*
2 * Intel GTT (Graphics Translation Table) routines
3 *
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
10 *
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
13 * here.
14 *
15 * /fairy-tale-mode off
16 */
17
18/*
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
23 */
24#ifdef CONFIG_DMAR
25#define USE_PCI_DMA_API 1
26#endif
27
Jesse Barnesd1d6ca72010-07-08 09:22:46 -070028/* Max amount of stolen space, anything above will be returned to Linux */
29int intel_max_stolen = 32 * 1024 * 1024;
30EXPORT_SYMBOL(intel_max_stolen);
31
Daniel Vetterf51b7662010-04-14 00:29:52 +020032static const struct aper_size_info_fixed intel_i810_sizes[] =
33{
34 {64, 16384, 4},
35 /* The 32M mode still requires a 64k gatt */
36 {32, 8192, 4}
37};
38
39#define AGP_DCACHE_MEMORY 1
40#define AGP_PHYS_MEMORY 2
41#define INTEL_AGP_CACHED_MEMORY 3
42
43static struct gatt_mask intel_i810_masks[] =
44{
45 {.mask = I810_PTE_VALID, .type = 0},
46 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
47 {.mask = I810_PTE_VALID, .type = 0},
48 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
49 .type = INTEL_AGP_CACHED_MEMORY}
50};
51
52static struct _intel_private {
53 struct pci_dev *pcidev; /* device one */
54 u8 __iomem *registers;
55 u32 __iomem *gtt; /* I915G */
56 int num_dcache_entries;
57 /* gtt_entries is the number of gtt entries that are already mapped
58 * to stolen memory. Stolen memory is larger than the memory mapped
59 * through gtt_entries, as it includes some reserved space for the BIOS
60 * popup and for the GTT.
61 */
62 int gtt_entries; /* i830+ */
63 int gtt_total_size;
64 union {
65 void __iomem *i9xx_flush_page;
66 void *i8xx_flush_page;
67 };
68 struct page *i8xx_page;
69 struct resource ifp_resource;
70 int resource_valid;
71} intel_private;
72
73#ifdef USE_PCI_DMA_API
74static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
75{
76 *ret = pci_map_page(intel_private.pcidev, page, 0,
77 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
78 if (pci_dma_mapping_error(intel_private.pcidev, *ret))
79 return -EINVAL;
80 return 0;
81}
82
83static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
84{
85 pci_unmap_page(intel_private.pcidev, dma,
86 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
87}
88
89static void intel_agp_free_sglist(struct agp_memory *mem)
90{
91 struct sg_table st;
92
93 st.sgl = mem->sg_list;
94 st.orig_nents = st.nents = mem->page_count;
95
96 sg_free_table(&st);
97
98 mem->sg_list = NULL;
99 mem->num_sg = 0;
100}
101
102static int intel_agp_map_memory(struct agp_memory *mem)
103{
104 struct sg_table st;
105 struct scatterlist *sg;
106 int i;
107
108 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
109
110 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
Chris Wilson831cd442010-07-24 18:29:37 +0100111 goto err;
Daniel Vetterf51b7662010-04-14 00:29:52 +0200112
113 mem->sg_list = sg = st.sgl;
114
115 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
116 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
117
118 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
119 mem->page_count, PCI_DMA_BIDIRECTIONAL);
Chris Wilson831cd442010-07-24 18:29:37 +0100120 if (unlikely(!mem->num_sg))
121 goto err;
122
Daniel Vetterf51b7662010-04-14 00:29:52 +0200123 return 0;
Chris Wilson831cd442010-07-24 18:29:37 +0100124
125err:
126 sg_free_table(&st);
127 return -ENOMEM;
Daniel Vetterf51b7662010-04-14 00:29:52 +0200128}
129
130static void intel_agp_unmap_memory(struct agp_memory *mem)
131{
132 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
133
134 pci_unmap_sg(intel_private.pcidev, mem->sg_list,
135 mem->page_count, PCI_DMA_BIDIRECTIONAL);
136 intel_agp_free_sglist(mem);
137}
138
139static void intel_agp_insert_sg_entries(struct agp_memory *mem,
140 off_t pg_start, int mask_type)
141{
142 struct scatterlist *sg;
143 int i, j;
144
145 j = pg_start;
146
147 WARN_ON(!mem->num_sg);
148
149 if (mem->num_sg == mem->page_count) {
150 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
151 writel(agp_bridge->driver->mask_memory(agp_bridge,
152 sg_dma_address(sg), mask_type),
153 intel_private.gtt+j);
154 j++;
155 }
156 } else {
157 /* sg may merge pages, but we have to separate
158 * per-page addr for GTT */
159 unsigned int len, m;
160
161 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
162 len = sg_dma_len(sg) / PAGE_SIZE;
163 for (m = 0; m < len; m++) {
164 writel(agp_bridge->driver->mask_memory(agp_bridge,
165 sg_dma_address(sg) + m * PAGE_SIZE,
166 mask_type),
167 intel_private.gtt+j);
168 j++;
169 }
170 }
171 }
172 readl(intel_private.gtt+j-1);
173}
174
175#else
176
177static void intel_agp_insert_sg_entries(struct agp_memory *mem,
178 off_t pg_start, int mask_type)
179{
180 int i, j;
181 u32 cache_bits = 0;
182
183 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
184 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
185 {
Zhenyu Wanga2757b62010-07-09 10:45:17 -0700186 cache_bits = GEN6_PTE_LLC_MLC;
Daniel Vetterf51b7662010-04-14 00:29:52 +0200187 }
188
189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
190 writel(agp_bridge->driver->mask_memory(agp_bridge,
191 page_to_phys(mem->pages[i]), mask_type),
192 intel_private.gtt+j);
193 }
194
195 readl(intel_private.gtt+j-1);
196}
197
198#endif
199
200static int intel_i810_fetch_size(void)
201{
202 u32 smram_miscc;
203 struct aper_size_info_fixed *values;
204
205 pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
206 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
207
208 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
209 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
210 return 0;
211 }
212 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
Daniel Vettere1583162010-04-14 00:29:58 +0200213 agp_bridge->current_size = (void *) (values + 1);
Daniel Vetterf51b7662010-04-14 00:29:52 +0200214 agp_bridge->aperture_size_idx = 1;
215 return values[1].size;
216 } else {
Daniel Vettere1583162010-04-14 00:29:58 +0200217 agp_bridge->current_size = (void *) (values);
Daniel Vetterf51b7662010-04-14 00:29:52 +0200218 agp_bridge->aperture_size_idx = 0;
219 return values[0].size;
220 }
221
222 return 0;
223}
224
225static int intel_i810_configure(void)
226{
227 struct aper_size_info_fixed *current_size;
228 u32 temp;
229 int i;
230
231 current_size = A_SIZE_FIX(agp_bridge->current_size);
232
233 if (!intel_private.registers) {
234 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
235 temp &= 0xfff80000;
236
237 intel_private.registers = ioremap(temp, 128 * 4096);
238 if (!intel_private.registers) {
239 dev_err(&intel_private.pcidev->dev,
240 "can't remap memory\n");
241 return -ENOMEM;
242 }
243 }
244
245 if ((readl(intel_private.registers+I810_DRAM_CTL)
246 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
247 /* This will need to be dynamically assigned */
248 dev_info(&intel_private.pcidev->dev,
249 "detected 4MB dedicated video ram\n");
250 intel_private.num_dcache_entries = 1024;
251 }
252 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
253 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
254 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
255 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
256
257 if (agp_bridge->driver->needs_scratch_page) {
258 for (i = 0; i < current_size->num_entries; i++) {
259 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
260 }
261 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
262 }
263 global_cache_flush();
264 return 0;
265}
266
267static void intel_i810_cleanup(void)
268{
269 writel(0, intel_private.registers+I810_PGETBL_CTL);
270 readl(intel_private.registers); /* PCI Posting. */
271 iounmap(intel_private.registers);
272}
273
Daniel Vetterf51b7662010-04-14 00:29:52 +0200274static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
275{
276 return;
277}
278
279/* Exists to support ARGB cursors */
280static struct page *i8xx_alloc_pages(void)
281{
282 struct page *page;
283
284 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
285 if (page == NULL)
286 return NULL;
287
288 if (set_pages_uc(page, 4) < 0) {
289 set_pages_wb(page, 4);
290 __free_pages(page, 2);
291 return NULL;
292 }
293 get_page(page);
294 atomic_inc(&agp_bridge->current_memory_agp);
295 return page;
296}
297
298static void i8xx_destroy_pages(struct page *page)
299{
300 if (page == NULL)
301 return;
302
303 set_pages_wb(page, 4);
304 put_page(page);
305 __free_pages(page, 2);
306 atomic_dec(&agp_bridge->current_memory_agp);
307}
308
309static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
310 int type)
311{
312 if (type < AGP_USER_TYPES)
313 return type;
314 else if (type == AGP_USER_CACHED_MEMORY)
315 return INTEL_AGP_CACHED_MEMORY;
316 else
317 return 0;
318}
319
320static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
321 int type)
322{
323 int i, j, num_entries;
324 void *temp;
325 int ret = -EINVAL;
326 int mask_type;
327
328 if (mem->page_count == 0)
329 goto out;
330
331 temp = agp_bridge->current_size;
332 num_entries = A_SIZE_FIX(temp)->num_entries;
333
334 if ((pg_start + mem->page_count) > num_entries)
335 goto out_err;
336
337
338 for (j = pg_start; j < (pg_start + mem->page_count); j++) {
339 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
340 ret = -EBUSY;
341 goto out_err;
342 }
343 }
344
345 if (type != mem->type)
346 goto out_err;
347
348 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
349
350 switch (mask_type) {
351 case AGP_DCACHE_MEMORY:
352 if (!mem->is_flushed)
353 global_cache_flush();
354 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
355 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
356 intel_private.registers+I810_PTE_BASE+(i*4));
357 }
358 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
359 break;
360 case AGP_PHYS_MEMORY:
361 case AGP_NORMAL_MEMORY:
362 if (!mem->is_flushed)
363 global_cache_flush();
364 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
365 writel(agp_bridge->driver->mask_memory(agp_bridge,
366 page_to_phys(mem->pages[i]), mask_type),
367 intel_private.registers+I810_PTE_BASE+(j*4));
368 }
369 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
370 break;
371 default:
372 goto out_err;
373 }
374
Daniel Vetterf51b7662010-04-14 00:29:52 +0200375out:
376 ret = 0;
377out_err:
378 mem->is_flushed = true;
379 return ret;
380}
381
382static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
383 int type)
384{
385 int i;
386
387 if (mem->page_count == 0)
388 return 0;
389
390 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
391 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
392 }
393 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
394
Daniel Vetterf51b7662010-04-14 00:29:52 +0200395 return 0;
396}
397
398/*
399 * The i810/i830 requires a physical address to program its mouse
400 * pointer into hardware.
401 * However the Xserver still writes to it through the agp aperture.
402 */
403static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
404{
405 struct agp_memory *new;
406 struct page *page;
407
408 switch (pg_count) {
409 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
410 break;
411 case 4:
412 /* kludge to get 4 physical pages for ARGB cursor */
413 page = i8xx_alloc_pages();
414 break;
415 default:
416 return NULL;
417 }
418
419 if (page == NULL)
420 return NULL;
421
422 new = agp_create_memory(pg_count);
423 if (new == NULL)
424 return NULL;
425
426 new->pages[0] = page;
427 if (pg_count == 4) {
428 /* kludge to get 4 physical pages for ARGB cursor */
429 new->pages[1] = new->pages[0] + 1;
430 new->pages[2] = new->pages[1] + 1;
431 new->pages[3] = new->pages[2] + 1;
432 }
433 new->page_count = pg_count;
434 new->num_scratch_pages = pg_count;
435 new->type = AGP_PHYS_MEMORY;
436 new->physical = page_to_phys(new->pages[0]);
437 return new;
438}
439
440static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
441{
442 struct agp_memory *new;
443
444 if (type == AGP_DCACHE_MEMORY) {
445 if (pg_count != intel_private.num_dcache_entries)
446 return NULL;
447
448 new = agp_create_memory(1);
449 if (new == NULL)
450 return NULL;
451
452 new->type = AGP_DCACHE_MEMORY;
453 new->page_count = pg_count;
454 new->num_scratch_pages = 0;
455 agp_free_page_array(new);
456 return new;
457 }
458 if (type == AGP_PHYS_MEMORY)
459 return alloc_agpphysmem_i8xx(pg_count, type);
460 return NULL;
461}
462
463static void intel_i810_free_by_type(struct agp_memory *curr)
464{
465 agp_free_key(curr->key);
466 if (curr->type == AGP_PHYS_MEMORY) {
467 if (curr->page_count == 4)
468 i8xx_destroy_pages(curr->pages[0]);
469 else {
470 agp_bridge->driver->agp_destroy_page(curr->pages[0],
471 AGP_PAGE_DESTROY_UNMAP);
472 agp_bridge->driver->agp_destroy_page(curr->pages[0],
473 AGP_PAGE_DESTROY_FREE);
474 }
475 agp_free_page_array(curr);
476 }
477 kfree(curr);
478}
479
480static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
481 dma_addr_t addr, int type)
482{
483 /* Type checking must be done elsewhere */
484 return addr | bridge->driver->masks[type].mask;
485}
486
487static struct aper_size_info_fixed intel_i830_sizes[] =
488{
489 {128, 32768, 5},
490 /* The 64M mode still requires a 128k gatt */
491 {64, 16384, 5},
492 {256, 65536, 6},
493 {512, 131072, 7},
494};
495
496static void intel_i830_init_gtt_entries(void)
497{
498 u16 gmch_ctrl;
499 int gtt_entries = 0;
500 u8 rdct;
501 int local = 0;
502 static const int ddt[4] = { 0, 16, 32, 64 };
503 int size; /* reserved space (in kb) at the top of stolen memory */
504
505 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
506
507 if (IS_I965) {
508 u32 pgetbl_ctl;
509 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
510
511 /* The 965 has a field telling us the size of the GTT,
512 * which may be larger than what is necessary to map the
513 * aperture.
514 */
515 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
516 case I965_PGETBL_SIZE_128KB:
517 size = 128;
518 break;
519 case I965_PGETBL_SIZE_256KB:
520 size = 256;
521 break;
522 case I965_PGETBL_SIZE_512KB:
523 size = 512;
524 break;
525 case I965_PGETBL_SIZE_1MB:
526 size = 1024;
527 break;
528 case I965_PGETBL_SIZE_2MB:
529 size = 2048;
530 break;
531 case I965_PGETBL_SIZE_1_5MB:
532 size = 1024 + 512;
533 break;
534 default:
535 dev_info(&intel_private.pcidev->dev,
536 "unknown page table size, assuming 512KB\n");
537 size = 512;
538 }
539 size += 4; /* add in BIOS popup space */
540 } else if (IS_G33 && !IS_PINEVIEW) {
541 /* G33's GTT size defined in gmch_ctrl */
542 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
543 case G33_PGETBL_SIZE_1M:
544 size = 1024;
545 break;
546 case G33_PGETBL_SIZE_2M:
547 size = 2048;
548 break;
549 default:
550 dev_info(&agp_bridge->dev->dev,
551 "unknown page table size 0x%x, assuming 512KB\n",
552 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
553 size = 512;
554 }
555 size += 4;
556 } else if (IS_G4X || IS_PINEVIEW) {
557 /* On 4 series hardware, GTT stolen is separate from graphics
558 * stolen, ignore it in stolen gtt entries counting. However,
559 * 4KB of the stolen memory doesn't get mapped to the GTT.
560 */
561 size = 4;
562 } else {
563 /* On previous hardware, the GTT size was just what was
564 * required to map the aperture.
565 */
566 size = agp_bridge->driver->fetch_size() + 4;
567 }
568
569 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
570 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
571 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
572 case I830_GMCH_GMS_STOLEN_512:
573 gtt_entries = KB(512) - KB(size);
574 break;
575 case I830_GMCH_GMS_STOLEN_1024:
576 gtt_entries = MB(1) - KB(size);
577 break;
578 case I830_GMCH_GMS_STOLEN_8192:
579 gtt_entries = MB(8) - KB(size);
580 break;
581 case I830_GMCH_GMS_LOCAL:
582 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
583 gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
584 MB(ddt[I830_RDRAM_DDT(rdct)]);
585 local = 1;
586 break;
587 default:
588 gtt_entries = 0;
589 break;
590 }
591 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
592 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
593 /*
594 * SandyBridge has new memory control reg at 0x50.w
595 */
596 u16 snb_gmch_ctl;
597 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
598 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
599 case SNB_GMCH_GMS_STOLEN_32M:
600 gtt_entries = MB(32) - KB(size);
601 break;
602 case SNB_GMCH_GMS_STOLEN_64M:
603 gtt_entries = MB(64) - KB(size);
604 break;
605 case SNB_GMCH_GMS_STOLEN_96M:
606 gtt_entries = MB(96) - KB(size);
607 break;
608 case SNB_GMCH_GMS_STOLEN_128M:
609 gtt_entries = MB(128) - KB(size);
610 break;
611 case SNB_GMCH_GMS_STOLEN_160M:
612 gtt_entries = MB(160) - KB(size);
613 break;
614 case SNB_GMCH_GMS_STOLEN_192M:
615 gtt_entries = MB(192) - KB(size);
616 break;
617 case SNB_GMCH_GMS_STOLEN_224M:
618 gtt_entries = MB(224) - KB(size);
619 break;
620 case SNB_GMCH_GMS_STOLEN_256M:
621 gtt_entries = MB(256) - KB(size);
622 break;
623 case SNB_GMCH_GMS_STOLEN_288M:
624 gtt_entries = MB(288) - KB(size);
625 break;
626 case SNB_GMCH_GMS_STOLEN_320M:
627 gtt_entries = MB(320) - KB(size);
628 break;
629 case SNB_GMCH_GMS_STOLEN_352M:
630 gtt_entries = MB(352) - KB(size);
631 break;
632 case SNB_GMCH_GMS_STOLEN_384M:
633 gtt_entries = MB(384) - KB(size);
634 break;
635 case SNB_GMCH_GMS_STOLEN_416M:
636 gtt_entries = MB(416) - KB(size);
637 break;
638 case SNB_GMCH_GMS_STOLEN_448M:
639 gtt_entries = MB(448) - KB(size);
640 break;
641 case SNB_GMCH_GMS_STOLEN_480M:
642 gtt_entries = MB(480) - KB(size);
643 break;
644 case SNB_GMCH_GMS_STOLEN_512M:
645 gtt_entries = MB(512) - KB(size);
646 break;
647 }
648 } else {
649 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
650 case I855_GMCH_GMS_STOLEN_1M:
651 gtt_entries = MB(1) - KB(size);
652 break;
653 case I855_GMCH_GMS_STOLEN_4M:
654 gtt_entries = MB(4) - KB(size);
655 break;
656 case I855_GMCH_GMS_STOLEN_8M:
657 gtt_entries = MB(8) - KB(size);
658 break;
659 case I855_GMCH_GMS_STOLEN_16M:
660 gtt_entries = MB(16) - KB(size);
661 break;
662 case I855_GMCH_GMS_STOLEN_32M:
663 gtt_entries = MB(32) - KB(size);
664 break;
665 case I915_GMCH_GMS_STOLEN_48M:
666 /* Check it's really I915G */
667 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
668 gtt_entries = MB(48) - KB(size);
669 else
670 gtt_entries = 0;
671 break;
672 case I915_GMCH_GMS_STOLEN_64M:
673 /* Check it's really I915G */
674 if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
675 gtt_entries = MB(64) - KB(size);
676 else
677 gtt_entries = 0;
678 break;
679 case G33_GMCH_GMS_STOLEN_128M:
680 if (IS_G33 || IS_I965 || IS_G4X)
681 gtt_entries = MB(128) - KB(size);
682 else
683 gtt_entries = 0;
684 break;
685 case G33_GMCH_GMS_STOLEN_256M:
686 if (IS_G33 || IS_I965 || IS_G4X)
687 gtt_entries = MB(256) - KB(size);
688 else
689 gtt_entries = 0;
690 break;
691 case INTEL_GMCH_GMS_STOLEN_96M:
692 if (IS_I965 || IS_G4X)
693 gtt_entries = MB(96) - KB(size);
694 else
695 gtt_entries = 0;
696 break;
697 case INTEL_GMCH_GMS_STOLEN_160M:
698 if (IS_I965 || IS_G4X)
699 gtt_entries = MB(160) - KB(size);
700 else
701 gtt_entries = 0;
702 break;
703 case INTEL_GMCH_GMS_STOLEN_224M:
704 if (IS_I965 || IS_G4X)
705 gtt_entries = MB(224) - KB(size);
706 else
707 gtt_entries = 0;
708 break;
709 case INTEL_GMCH_GMS_STOLEN_352M:
710 if (IS_I965 || IS_G4X)
711 gtt_entries = MB(352) - KB(size);
712 else
713 gtt_entries = 0;
714 break;
715 default:
716 gtt_entries = 0;
717 break;
718 }
719 }
Jesse Barnesd1d6ca72010-07-08 09:22:46 -0700720 if (!local && gtt_entries > intel_max_stolen) {
721 dev_info(&agp_bridge->dev->dev,
722 "detected %dK stolen memory, trimming to %dK\n",
723 gtt_entries / KB(1), intel_max_stolen / KB(1));
724 gtt_entries = intel_max_stolen / KB(4);
725 } else if (gtt_entries > 0) {
Daniel Vetterf51b7662010-04-14 00:29:52 +0200726 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
727 gtt_entries / KB(1), local ? "local" : "stolen");
728 gtt_entries /= KB(4);
729 } else {
730 dev_info(&agp_bridge->dev->dev,
731 "no pre-allocated video memory detected\n");
732 gtt_entries = 0;
733 }
734
735 intel_private.gtt_entries = gtt_entries;
736}
737
738static void intel_i830_fini_flush(void)
739{
740 kunmap(intel_private.i8xx_page);
741 intel_private.i8xx_flush_page = NULL;
742 unmap_page_from_agp(intel_private.i8xx_page);
743
744 __free_page(intel_private.i8xx_page);
745 intel_private.i8xx_page = NULL;
746}
747
748static void intel_i830_setup_flush(void)
749{
750 /* return if we've already set the flush mechanism up */
751 if (intel_private.i8xx_page)
752 return;
753
754 intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
755 if (!intel_private.i8xx_page)
756 return;
757
758 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
759 if (!intel_private.i8xx_flush_page)
760 intel_i830_fini_flush();
761}
762
763/* The chipset_flush interface needs to get data that has already been
764 * flushed out of the CPU all the way out to main memory, because the GPU
765 * doesn't snoop those buffers.
766 *
767 * The 8xx series doesn't have the same lovely interface for flushing the
768 * chipset write buffers that the later chips do. According to the 865
769 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
770 * that buffer out, we just fill 1KB and clflush it out, on the assumption
771 * that it'll push whatever was in there out. It appears to work.
772 */
773static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
774{
775 unsigned int *pg = intel_private.i8xx_flush_page;
776
777 memset(pg, 0, 1024);
778
779 if (cpu_has_clflush)
780 clflush_cache_range(pg, 1024);
781 else if (wbinvd_on_all_cpus() != 0)
782 printk(KERN_ERR "Timed out waiting for cache flush.\n");
783}
784
785/* The intel i830 automatically initializes the agp aperture during POST.
786 * Use the memory already set aside for in the GTT.
787 */
788static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
789{
790 int page_order;
791 struct aper_size_info_fixed *size;
792 int num_entries;
793 u32 temp;
794
795 size = agp_bridge->current_size;
796 page_order = size->page_order;
797 num_entries = size->num_entries;
798 agp_bridge->gatt_table_real = NULL;
799
800 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
801 temp &= 0xfff80000;
802
803 intel_private.registers = ioremap(temp, 128 * 4096);
804 if (!intel_private.registers)
805 return -ENOMEM;
806
807 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
808 global_cache_flush(); /* FIXME: ?? */
809
810 /* we have to call this as early as possible after the MMIO base address is known */
811 intel_i830_init_gtt_entries();
Ondrej Zary8699be32010-06-16 10:13:52 +0200812 if (intel_private.gtt_entries == 0) {
813 iounmap(intel_private.registers);
814 return -ENOMEM;
815 }
Daniel Vetterf51b7662010-04-14 00:29:52 +0200816
817 agp_bridge->gatt_table = NULL;
818
819 agp_bridge->gatt_bus_addr = temp;
820
821 return 0;
822}
823
824/* Return the gatt table to a sane state. Use the top of stolen
825 * memory for the GTT.
826 */
827static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
828{
829 return 0;
830}
831
832static int intel_i830_fetch_size(void)
833{
834 u16 gmch_ctrl;
835 struct aper_size_info_fixed *values;
836
837 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
838
839 if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
840 agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
841 /* 855GM/852GM/865G has 128MB aperture size */
Daniel Vettere1583162010-04-14 00:29:58 +0200842 agp_bridge->current_size = (void *) values;
Daniel Vetterf51b7662010-04-14 00:29:52 +0200843 agp_bridge->aperture_size_idx = 0;
844 return values[0].size;
845 }
846
847 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
848
849 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
Daniel Vettere1583162010-04-14 00:29:58 +0200850 agp_bridge->current_size = (void *) values;
Daniel Vetterf51b7662010-04-14 00:29:52 +0200851 agp_bridge->aperture_size_idx = 0;
852 return values[0].size;
853 } else {
Daniel Vettere1583162010-04-14 00:29:58 +0200854 agp_bridge->current_size = (void *) (values + 1);
Daniel Vetterf51b7662010-04-14 00:29:52 +0200855 agp_bridge->aperture_size_idx = 1;
856 return values[1].size;
857 }
858
859 return 0;
860}
861
862static int intel_i830_configure(void)
863{
864 struct aper_size_info_fixed *current_size;
865 u32 temp;
866 u16 gmch_ctrl;
867 int i;
868
869 current_size = A_SIZE_FIX(agp_bridge->current_size);
870
871 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
872 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
873
874 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
875 gmch_ctrl |= I830_GMCH_ENABLED;
876 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
877
878 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
879 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
880
881 if (agp_bridge->driver->needs_scratch_page) {
882 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
883 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
884 }
885 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
886 }
887
888 global_cache_flush();
889
890 intel_i830_setup_flush();
891 return 0;
892}
893
894static void intel_i830_cleanup(void)
895{
896 iounmap(intel_private.registers);
897}
898
899static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
900 int type)
901{
902 int i, j, num_entries;
903 void *temp;
904 int ret = -EINVAL;
905 int mask_type;
906
907 if (mem->page_count == 0)
908 goto out;
909
910 temp = agp_bridge->current_size;
911 num_entries = A_SIZE_FIX(temp)->num_entries;
912
913 if (pg_start < intel_private.gtt_entries) {
914 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
915 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
916 pg_start, intel_private.gtt_entries);
917
918 dev_info(&intel_private.pcidev->dev,
919 "trying to insert into local/stolen memory\n");
920 goto out_err;
921 }
922
923 if ((pg_start + mem->page_count) > num_entries)
924 goto out_err;
925
926 /* The i830 can't check the GTT for entries since its read only,
927 * depend on the caller to make the correct offset decisions.
928 */
929
930 if (type != mem->type)
931 goto out_err;
932
933 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
934
935 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
936 mask_type != INTEL_AGP_CACHED_MEMORY)
937 goto out_err;
938
939 if (!mem->is_flushed)
940 global_cache_flush();
941
942 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
943 writel(agp_bridge->driver->mask_memory(agp_bridge,
944 page_to_phys(mem->pages[i]), mask_type),
945 intel_private.registers+I810_PTE_BASE+(j*4));
946 }
947 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
Daniel Vetterf51b7662010-04-14 00:29:52 +0200948
949out:
950 ret = 0;
951out_err:
952 mem->is_flushed = true;
953 return ret;
954}
955
956static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
957 int type)
958{
959 int i;
960
961 if (mem->page_count == 0)
962 return 0;
963
964 if (pg_start < intel_private.gtt_entries) {
965 dev_info(&intel_private.pcidev->dev,
966 "trying to disable local/stolen memory\n");
967 return -EINVAL;
968 }
969
970 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
971 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
972 }
973 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
974
Daniel Vetterf51b7662010-04-14 00:29:52 +0200975 return 0;
976}
977
978static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
979{
980 if (type == AGP_PHYS_MEMORY)
981 return alloc_agpphysmem_i8xx(pg_count, type);
982 /* always return NULL for other allocation types for now */
983 return NULL;
984}
985
986static int intel_alloc_chipset_flush_resource(void)
987{
988 int ret;
989 ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
990 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
991 pcibios_align_resource, agp_bridge->dev);
992
993 return ret;
994}
995
996static void intel_i915_setup_chipset_flush(void)
997{
998 int ret;
999 u32 temp;
1000
1001 pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
1002 if (!(temp & 0x1)) {
1003 intel_alloc_chipset_flush_resource();
1004 intel_private.resource_valid = 1;
1005 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1006 } else {
1007 temp &= ~1;
1008
1009 intel_private.resource_valid = 1;
1010 intel_private.ifp_resource.start = temp;
1011 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1012 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1013 /* some BIOSes reserve this area in a pnp some don't */
1014 if (ret)
1015 intel_private.resource_valid = 0;
1016 }
1017}
1018
1019static void intel_i965_g33_setup_chipset_flush(void)
1020{
1021 u32 temp_hi, temp_lo;
1022 int ret;
1023
1024 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1025 pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1026
1027 if (!(temp_lo & 0x1)) {
1028
1029 intel_alloc_chipset_flush_resource();
1030
1031 intel_private.resource_valid = 1;
1032 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1033 upper_32_bits(intel_private.ifp_resource.start));
1034 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1035 } else {
1036 u64 l64;
1037
1038 temp_lo &= ~0x1;
1039 l64 = ((u64)temp_hi << 32) | temp_lo;
1040
1041 intel_private.resource_valid = 1;
1042 intel_private.ifp_resource.start = l64;
1043 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1044 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1045 /* some BIOSes reserve this area in a pnp some don't */
1046 if (ret)
1047 intel_private.resource_valid = 0;
1048 }
1049}
1050
1051static void intel_i9xx_setup_flush(void)
1052{
1053 /* return if already configured */
1054 if (intel_private.ifp_resource.start)
1055 return;
1056
1057 if (IS_SNB)
1058 return;
1059
1060 /* setup a resource for this object */
1061 intel_private.ifp_resource.name = "Intel Flush Page";
1062 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1063
1064 /* Setup chipset flush for 915 */
1065 if (IS_I965 || IS_G33 || IS_G4X) {
1066 intel_i965_g33_setup_chipset_flush();
1067 } else {
1068 intel_i915_setup_chipset_flush();
1069 }
1070
Chris Wilsondf51e7a2010-09-04 14:57:27 +01001071 if (intel_private.ifp_resource.start)
Daniel Vetterf51b7662010-04-14 00:29:52 +02001072 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
Chris Wilsondf51e7a2010-09-04 14:57:27 +01001073 if (!intel_private.i9xx_flush_page)
1074 dev_err(&intel_private.pcidev->dev,
1075 "can't ioremap flush page - no chipset flushing\n");
Daniel Vetterf51b7662010-04-14 00:29:52 +02001076}
1077
Chris Wilsonf1befe72010-05-18 12:24:51 +01001078static int intel_i9xx_configure(void)
Daniel Vetterf51b7662010-04-14 00:29:52 +02001079{
1080 struct aper_size_info_fixed *current_size;
1081 u32 temp;
1082 u16 gmch_ctrl;
1083 int i;
1084
1085 current_size = A_SIZE_FIX(agp_bridge->current_size);
1086
1087 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1088
1089 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1090
1091 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1092 gmch_ctrl |= I830_GMCH_ENABLED;
1093 pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1094
1095 writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1096 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1097
1098 if (agp_bridge->driver->needs_scratch_page) {
1099 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1100 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1101 }
1102 readl(intel_private.gtt+i-1); /* PCI Posting. */
1103 }
1104
1105 global_cache_flush();
1106
1107 intel_i9xx_setup_flush();
1108
1109 return 0;
1110}
1111
1112static void intel_i915_cleanup(void)
1113{
1114 if (intel_private.i9xx_flush_page)
1115 iounmap(intel_private.i9xx_flush_page);
1116 if (intel_private.resource_valid)
1117 release_resource(&intel_private.ifp_resource);
1118 intel_private.ifp_resource.start = 0;
1119 intel_private.resource_valid = 0;
1120 iounmap(intel_private.gtt);
1121 iounmap(intel_private.registers);
1122}
1123
1124static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1125{
1126 if (intel_private.i9xx_flush_page)
1127 writel(1, intel_private.i9xx_flush_page);
1128}
1129
1130static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1131 int type)
1132{
1133 int num_entries;
1134 void *temp;
1135 int ret = -EINVAL;
1136 int mask_type;
1137
1138 if (mem->page_count == 0)
1139 goto out;
1140
1141 temp = agp_bridge->current_size;
1142 num_entries = A_SIZE_FIX(temp)->num_entries;
1143
1144 if (pg_start < intel_private.gtt_entries) {
1145 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1146 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1147 pg_start, intel_private.gtt_entries);
1148
1149 dev_info(&intel_private.pcidev->dev,
1150 "trying to insert into local/stolen memory\n");
1151 goto out_err;
1152 }
1153
1154 if ((pg_start + mem->page_count) > num_entries)
1155 goto out_err;
1156
1157 /* The i915 can't check the GTT for entries since it's read only;
1158 * depend on the caller to make the correct offset decisions.
1159 */
1160
1161 if (type != mem->type)
1162 goto out_err;
1163
1164 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1165
1166 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1167 mask_type != INTEL_AGP_CACHED_MEMORY)
1168 goto out_err;
1169
1170 if (!mem->is_flushed)
1171 global_cache_flush();
1172
1173 intel_agp_insert_sg_entries(mem, pg_start, mask_type);
Daniel Vetterf51b7662010-04-14 00:29:52 +02001174
1175 out:
1176 ret = 0;
1177 out_err:
1178 mem->is_flushed = true;
1179 return ret;
1180}
1181
1182static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1183 int type)
1184{
1185 int i;
1186
1187 if (mem->page_count == 0)
1188 return 0;
1189
1190 if (pg_start < intel_private.gtt_entries) {
1191 dev_info(&intel_private.pcidev->dev,
1192 "trying to disable local/stolen memory\n");
1193 return -EINVAL;
1194 }
1195
1196 for (i = pg_start; i < (mem->page_count + pg_start); i++)
1197 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1198
1199 readl(intel_private.gtt+i-1);
1200
Daniel Vetterf51b7662010-04-14 00:29:52 +02001201 return 0;
1202}
1203
1204/* Return the aperture size by just checking the resource length. The effect
1205 * described in the spec of the MSAC registers is just changing of the
1206 * resource size.
1207 */
1208static int intel_i9xx_fetch_size(void)
1209{
1210 int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1211 int aper_size; /* size in megabytes */
1212 int i;
1213
1214 aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1215
1216 for (i = 0; i < num_sizes; i++) {
1217 if (aper_size == intel_i830_sizes[i].size) {
1218 agp_bridge->current_size = intel_i830_sizes + i;
Daniel Vetterf51b7662010-04-14 00:29:52 +02001219 return aper_size;
1220 }
1221 }
1222
1223 return 0;
1224}
1225
Chris Wilsonf1befe72010-05-18 12:24:51 +01001226static int intel_i915_get_gtt_size(void)
1227{
1228 int size;
1229
1230 if (IS_G33) {
1231 u16 gmch_ctrl;
1232
1233 /* G33's GTT size defined in gmch_ctrl */
1234 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
Tim Gardnere7b96f22010-07-09 14:48:50 -06001235 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
1236 case I830_GMCH_GMS_STOLEN_512:
1237 size = 512;
1238 break;
1239 case I830_GMCH_GMS_STOLEN_1024:
Chris Wilsonf1befe72010-05-18 12:24:51 +01001240 size = 1024;
1241 break;
Tim Gardnere7b96f22010-07-09 14:48:50 -06001242 case I830_GMCH_GMS_STOLEN_8192:
1243 size = 8*1024;
Chris Wilsonf1befe72010-05-18 12:24:51 +01001244 break;
1245 default:
1246 dev_info(&agp_bridge->dev->dev,
1247 "unknown page table size 0x%x, assuming 512KB\n",
Tim Gardnere7b96f22010-07-09 14:48:50 -06001248 (gmch_ctrl & I830_GMCH_GMS_MASK));
Chris Wilsonf1befe72010-05-18 12:24:51 +01001249 size = 512;
1250 }
1251 } else {
1252 /* On previous hardware, the GTT size was just what was
1253 * required to map the aperture.
1254 */
1255 size = agp_bridge->driver->fetch_size();
1256 }
1257
1258 return KB(size);
1259}
1260
Daniel Vetterf51b7662010-04-14 00:29:52 +02001261/* The intel i915 automatically initializes the agp aperture during POST.
1262 * Use the memory already set aside for in the GTT.
1263 */
1264static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1265{
1266 int page_order;
1267 struct aper_size_info_fixed *size;
1268 int num_entries;
1269 u32 temp, temp2;
Chris Wilsonf1befe72010-05-18 12:24:51 +01001270 int gtt_map_size;
Daniel Vetterf51b7662010-04-14 00:29:52 +02001271
1272 size = agp_bridge->current_size;
1273 page_order = size->page_order;
1274 num_entries = size->num_entries;
1275 agp_bridge->gatt_table_real = NULL;
1276
1277 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1278 pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1279
Chris Wilsonf1befe72010-05-18 12:24:51 +01001280 gtt_map_size = intel_i915_get_gtt_size();
1281
Daniel Vetterf51b7662010-04-14 00:29:52 +02001282 intel_private.gtt = ioremap(temp2, gtt_map_size);
1283 if (!intel_private.gtt)
1284 return -ENOMEM;
1285
1286 intel_private.gtt_total_size = gtt_map_size / 4;
1287
1288 temp &= 0xfff80000;
1289
1290 intel_private.registers = ioremap(temp, 128 * 4096);
1291 if (!intel_private.registers) {
1292 iounmap(intel_private.gtt);
1293 return -ENOMEM;
1294 }
1295
1296 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1297 global_cache_flush(); /* FIXME: ? */
1298
1299 /* we have to call this as early as possible after the MMIO base address is known */
1300 intel_i830_init_gtt_entries();
Ondrej Zary8699be32010-06-16 10:13:52 +02001301 if (intel_private.gtt_entries == 0) {
1302 iounmap(intel_private.gtt);
1303 iounmap(intel_private.registers);
1304 return -ENOMEM;
1305 }
Daniel Vetterf51b7662010-04-14 00:29:52 +02001306
1307 agp_bridge->gatt_table = NULL;
1308
1309 agp_bridge->gatt_bus_addr = temp;
1310
1311 return 0;
1312}
1313
1314/*
1315 * The i965 supports 36-bit physical addresses, but to keep
1316 * the format of the GTT the same, the bits that don't fit
1317 * in a 32-bit word are shifted down to bits 4..7.
1318 *
1319 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1320 * is always zero on 32-bit architectures, so no need to make
1321 * this conditional.
1322 */
1323static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1324 dma_addr_t addr, int type)
1325{
1326 /* Shift high bits down */
1327 addr |= (addr >> 28) & 0xf0;
1328
1329 /* Type checking must be done elsewhere */
1330 return addr | bridge->driver->masks[type].mask;
1331}
1332
Zhenyu Wang3869d4a2010-07-09 10:40:58 -07001333static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1334 dma_addr_t addr, int type)
1335{
1336 /* Shift high bits down */
1337 addr |= (addr >> 28) & 0xff;
1338
1339 /* Type checking must be done elsewhere */
1340 return addr | bridge->driver->masks[type].mask;
1341}
1342
Daniel Vetterf51b7662010-04-14 00:29:52 +02001343static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1344{
1345 u16 snb_gmch_ctl;
1346
1347 switch (agp_bridge->dev->device) {
1348 case PCI_DEVICE_ID_INTEL_GM45_HB:
1349 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1350 case PCI_DEVICE_ID_INTEL_Q45_HB:
1351 case PCI_DEVICE_ID_INTEL_G45_HB:
1352 case PCI_DEVICE_ID_INTEL_G41_HB:
1353 case PCI_DEVICE_ID_INTEL_B43_HB:
1354 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1355 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1356 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1357 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1358 *gtt_offset = *gtt_size = MB(2);
1359 break;
1360 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1361 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1362 *gtt_offset = MB(2);
1363
1364 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1365 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1366 default:
1367 case SNB_GTT_SIZE_0M:
1368 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1369 *gtt_size = MB(0);
1370 break;
1371 case SNB_GTT_SIZE_1M:
1372 *gtt_size = MB(1);
1373 break;
1374 case SNB_GTT_SIZE_2M:
1375 *gtt_size = MB(2);
1376 break;
1377 }
1378 break;
1379 default:
1380 *gtt_offset = *gtt_size = KB(512);
1381 }
1382}
1383
1384/* The intel i965 automatically initializes the agp aperture during POST.
1385 * Use the memory already set aside for in the GTT.
1386 */
1387static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1388{
1389 int page_order;
1390 struct aper_size_info_fixed *size;
1391 int num_entries;
1392 u32 temp;
1393 int gtt_offset, gtt_size;
1394
1395 size = agp_bridge->current_size;
1396 page_order = size->page_order;
1397 num_entries = size->num_entries;
1398 agp_bridge->gatt_table_real = NULL;
1399
1400 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1401
1402 temp &= 0xfff00000;
1403
1404 intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1405
1406 intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1407
1408 if (!intel_private.gtt)
1409 return -ENOMEM;
1410
1411 intel_private.gtt_total_size = gtt_size / 4;
1412
1413 intel_private.registers = ioremap(temp, 128 * 4096);
1414 if (!intel_private.registers) {
1415 iounmap(intel_private.gtt);
1416 return -ENOMEM;
1417 }
1418
1419 temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1420 global_cache_flush(); /* FIXME: ? */
1421
1422 /* we have to call this as early as possible after the MMIO base address is known */
1423 intel_i830_init_gtt_entries();
Ondrej Zary8699be32010-06-16 10:13:52 +02001424 if (intel_private.gtt_entries == 0) {
1425 iounmap(intel_private.gtt);
1426 iounmap(intel_private.registers);
1427 return -ENOMEM;
1428 }
Daniel Vetterf51b7662010-04-14 00:29:52 +02001429
1430 agp_bridge->gatt_table = NULL;
1431
1432 agp_bridge->gatt_bus_addr = temp;
1433
1434 return 0;
1435}
1436
1437static const struct agp_bridge_driver intel_810_driver = {
1438 .owner = THIS_MODULE,
1439 .aperture_sizes = intel_i810_sizes,
1440 .size_type = FIXED_APER_SIZE,
1441 .num_aperture_sizes = 2,
1442 .needs_scratch_page = true,
1443 .configure = intel_i810_configure,
1444 .fetch_size = intel_i810_fetch_size,
1445 .cleanup = intel_i810_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001446 .mask_memory = intel_i810_mask_memory,
1447 .masks = intel_i810_masks,
1448 .agp_enable = intel_i810_agp_enable,
1449 .cache_flush = global_cache_flush,
1450 .create_gatt_table = agp_generic_create_gatt_table,
1451 .free_gatt_table = agp_generic_free_gatt_table,
1452 .insert_memory = intel_i810_insert_entries,
1453 .remove_memory = intel_i810_remove_entries,
1454 .alloc_by_type = intel_i810_alloc_by_type,
1455 .free_by_type = intel_i810_free_by_type,
1456 .agp_alloc_page = agp_generic_alloc_page,
1457 .agp_alloc_pages = agp_generic_alloc_pages,
1458 .agp_destroy_page = agp_generic_destroy_page,
1459 .agp_destroy_pages = agp_generic_destroy_pages,
1460 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1461};
1462
1463static const struct agp_bridge_driver intel_830_driver = {
1464 .owner = THIS_MODULE,
1465 .aperture_sizes = intel_i830_sizes,
1466 .size_type = FIXED_APER_SIZE,
1467 .num_aperture_sizes = 4,
1468 .needs_scratch_page = true,
1469 .configure = intel_i830_configure,
1470 .fetch_size = intel_i830_fetch_size,
1471 .cleanup = intel_i830_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001472 .mask_memory = intel_i810_mask_memory,
1473 .masks = intel_i810_masks,
1474 .agp_enable = intel_i810_agp_enable,
1475 .cache_flush = global_cache_flush,
1476 .create_gatt_table = intel_i830_create_gatt_table,
1477 .free_gatt_table = intel_i830_free_gatt_table,
1478 .insert_memory = intel_i830_insert_entries,
1479 .remove_memory = intel_i830_remove_entries,
1480 .alloc_by_type = intel_i830_alloc_by_type,
1481 .free_by_type = intel_i810_free_by_type,
1482 .agp_alloc_page = agp_generic_alloc_page,
1483 .agp_alloc_pages = agp_generic_alloc_pages,
1484 .agp_destroy_page = agp_generic_destroy_page,
1485 .agp_destroy_pages = agp_generic_destroy_pages,
1486 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1487 .chipset_flush = intel_i830_chipset_flush,
1488};
1489
1490static const struct agp_bridge_driver intel_915_driver = {
1491 .owner = THIS_MODULE,
1492 .aperture_sizes = intel_i830_sizes,
1493 .size_type = FIXED_APER_SIZE,
1494 .num_aperture_sizes = 4,
1495 .needs_scratch_page = true,
Chris Wilsonf1befe72010-05-18 12:24:51 +01001496 .configure = intel_i9xx_configure,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001497 .fetch_size = intel_i9xx_fetch_size,
1498 .cleanup = intel_i915_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001499 .mask_memory = intel_i810_mask_memory,
1500 .masks = intel_i810_masks,
1501 .agp_enable = intel_i810_agp_enable,
1502 .cache_flush = global_cache_flush,
1503 .create_gatt_table = intel_i915_create_gatt_table,
1504 .free_gatt_table = intel_i830_free_gatt_table,
1505 .insert_memory = intel_i915_insert_entries,
1506 .remove_memory = intel_i915_remove_entries,
1507 .alloc_by_type = intel_i830_alloc_by_type,
1508 .free_by_type = intel_i810_free_by_type,
1509 .agp_alloc_page = agp_generic_alloc_page,
1510 .agp_alloc_pages = agp_generic_alloc_pages,
1511 .agp_destroy_page = agp_generic_destroy_page,
1512 .agp_destroy_pages = agp_generic_destroy_pages,
1513 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1514 .chipset_flush = intel_i915_chipset_flush,
1515#ifdef USE_PCI_DMA_API
1516 .agp_map_page = intel_agp_map_page,
1517 .agp_unmap_page = intel_agp_unmap_page,
1518 .agp_map_memory = intel_agp_map_memory,
1519 .agp_unmap_memory = intel_agp_unmap_memory,
1520#endif
1521};
1522
1523static const struct agp_bridge_driver intel_i965_driver = {
1524 .owner = THIS_MODULE,
1525 .aperture_sizes = intel_i830_sizes,
1526 .size_type = FIXED_APER_SIZE,
1527 .num_aperture_sizes = 4,
1528 .needs_scratch_page = true,
Chris Wilsonf1befe72010-05-18 12:24:51 +01001529 .configure = intel_i9xx_configure,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001530 .fetch_size = intel_i9xx_fetch_size,
1531 .cleanup = intel_i915_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001532 .mask_memory = intel_i965_mask_memory,
1533 .masks = intel_i810_masks,
1534 .agp_enable = intel_i810_agp_enable,
1535 .cache_flush = global_cache_flush,
1536 .create_gatt_table = intel_i965_create_gatt_table,
1537 .free_gatt_table = intel_i830_free_gatt_table,
1538 .insert_memory = intel_i915_insert_entries,
1539 .remove_memory = intel_i915_remove_entries,
1540 .alloc_by_type = intel_i830_alloc_by_type,
1541 .free_by_type = intel_i810_free_by_type,
1542 .agp_alloc_page = agp_generic_alloc_page,
1543 .agp_alloc_pages = agp_generic_alloc_pages,
1544 .agp_destroy_page = agp_generic_destroy_page,
1545 .agp_destroy_pages = agp_generic_destroy_pages,
1546 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1547 .chipset_flush = intel_i915_chipset_flush,
1548#ifdef USE_PCI_DMA_API
1549 .agp_map_page = intel_agp_map_page,
1550 .agp_unmap_page = intel_agp_unmap_page,
1551 .agp_map_memory = intel_agp_map_memory,
1552 .agp_unmap_memory = intel_agp_unmap_memory,
1553#endif
1554};
1555
Zhenyu Wang3869d4a2010-07-09 10:40:58 -07001556static const struct agp_bridge_driver intel_gen6_driver = {
1557 .owner = THIS_MODULE,
1558 .aperture_sizes = intel_i830_sizes,
1559 .size_type = FIXED_APER_SIZE,
1560 .num_aperture_sizes = 4,
1561 .needs_scratch_page = true,
1562 .configure = intel_i9xx_configure,
1563 .fetch_size = intel_i9xx_fetch_size,
1564 .cleanup = intel_i915_cleanup,
1565 .mask_memory = intel_gen6_mask_memory,
1566 .masks = intel_i810_masks,
1567 .agp_enable = intel_i810_agp_enable,
1568 .cache_flush = global_cache_flush,
1569 .create_gatt_table = intel_i965_create_gatt_table,
1570 .free_gatt_table = intel_i830_free_gatt_table,
1571 .insert_memory = intel_i915_insert_entries,
1572 .remove_memory = intel_i915_remove_entries,
1573 .alloc_by_type = intel_i830_alloc_by_type,
1574 .free_by_type = intel_i810_free_by_type,
1575 .agp_alloc_page = agp_generic_alloc_page,
1576 .agp_alloc_pages = agp_generic_alloc_pages,
1577 .agp_destroy_page = agp_generic_destroy_page,
1578 .agp_destroy_pages = agp_generic_destroy_pages,
1579 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1580 .chipset_flush = intel_i915_chipset_flush,
1581#ifdef USE_PCI_DMA_API
1582 .agp_map_page = intel_agp_map_page,
1583 .agp_unmap_page = intel_agp_unmap_page,
1584 .agp_map_memory = intel_agp_map_memory,
1585 .agp_unmap_memory = intel_agp_unmap_memory,
1586#endif
1587};
1588
Daniel Vetterf51b7662010-04-14 00:29:52 +02001589static const struct agp_bridge_driver intel_g33_driver = {
1590 .owner = THIS_MODULE,
1591 .aperture_sizes = intel_i830_sizes,
1592 .size_type = FIXED_APER_SIZE,
1593 .num_aperture_sizes = 4,
1594 .needs_scratch_page = true,
Chris Wilsonf1befe72010-05-18 12:24:51 +01001595 .configure = intel_i9xx_configure,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001596 .fetch_size = intel_i9xx_fetch_size,
1597 .cleanup = intel_i915_cleanup,
Daniel Vetterf51b7662010-04-14 00:29:52 +02001598 .mask_memory = intel_i965_mask_memory,
1599 .masks = intel_i810_masks,
1600 .agp_enable = intel_i810_agp_enable,
1601 .cache_flush = global_cache_flush,
1602 .create_gatt_table = intel_i915_create_gatt_table,
1603 .free_gatt_table = intel_i830_free_gatt_table,
1604 .insert_memory = intel_i915_insert_entries,
1605 .remove_memory = intel_i915_remove_entries,
1606 .alloc_by_type = intel_i830_alloc_by_type,
1607 .free_by_type = intel_i810_free_by_type,
1608 .agp_alloc_page = agp_generic_alloc_page,
1609 .agp_alloc_pages = agp_generic_alloc_pages,
1610 .agp_destroy_page = agp_generic_destroy_page,
1611 .agp_destroy_pages = agp_generic_destroy_pages,
1612 .agp_type_to_mask_type = intel_i830_type_to_mask_type,
1613 .chipset_flush = intel_i915_chipset_flush,
1614#ifdef USE_PCI_DMA_API
1615 .agp_map_page = intel_agp_map_page,
1616 .agp_unmap_page = intel_agp_unmap_page,
1617 .agp_map_memory = intel_agp_map_memory,
1618 .agp_unmap_memory = intel_agp_unmap_memory,
1619#endif
1620};