blob: a4d22e5eb176ef342023fdf0e581c564139139ed [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001#include <linux/pagemap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09002#include <linux/slab.h>
Ben Skeggs6ee73862009-12-11 19:24:15 +10003
Ben Skeggsebb945a2012-07-20 08:17:34 +10004#include <subdev/fb.h>
5
6#include "nouveau_drm.h"
7#include "nouveau_ttm.h"
Ben Skeggs6ee73862009-12-11 19:24:15 +10008
9struct nouveau_sgdma_be {
Jerome Glisse8e7e7052011-11-09 17:15:26 -050010 /* this has to be the first field so populate/unpopulated in
11 * nouve_bo.c works properly, otherwise have to move them here
12 */
13 struct ttm_dma_tt ttm;
Ben Skeggs6ee73862009-12-11 19:24:15 +100014 struct drm_device *dev;
Ben Skeggs3863c9b2012-07-14 19:09:17 +100015 struct nouveau_mem *node;
Ben Skeggs6ee73862009-12-11 19:24:15 +100016};
17
Ben Skeggsefa58db2011-01-10 16:24:00 +100018static void
Jerome Glisse649bf3c2011-11-01 20:46:13 -040019nouveau_sgdma_destroy(struct ttm_tt *ttm)
Ben Skeggsefa58db2011-01-10 16:24:00 +100020{
Jerome Glisse649bf3c2011-11-01 20:46:13 -040021 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
Ben Skeggsefa58db2011-01-10 16:24:00 +100022
Jerome Glisse649bf3c2011-11-01 20:46:13 -040023 if (ttm) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -050024 ttm_dma_tt_fini(&nvbe->ttm);
Jerome Glisse649bf3c2011-11-01 20:46:13 -040025 kfree(nvbe);
Ben Skeggsefa58db2011-01-10 16:24:00 +100026 }
27}
28
Ben Skeggs6ee73862009-12-11 19:24:15 +100029static int
Jerome Glisse649bf3c2011-11-01 20:46:13 -040030nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +100031{
Jerome Glisse649bf3c2011-11-01 20:46:13 -040032 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
Ben Skeggs3863c9b2012-07-14 19:09:17 +100033 struct nouveau_mem *node = mem->mm_node;
Ben Skeggs6ee73862009-12-11 19:24:15 +100034
Ben Skeggs3863c9b2012-07-14 19:09:17 +100035 if (ttm->sg) {
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +100036 node->sg = ttm->sg;
37 node->pages = NULL;
Ben Skeggs3863c9b2012-07-14 19:09:17 +100038 } else {
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +100039 node->sg = NULL;
Ben Skeggs3863c9b2012-07-14 19:09:17 +100040 node->pages = nvbe->ttm.dma_address;
Ben Skeggs6ee73862009-12-11 19:24:15 +100041 }
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +100042 node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
Ben Skeggs6ee73862009-12-11 19:24:15 +100043
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +100044 nouveau_vm_map(&node->vma[0], node);
Ben Skeggs3863c9b2012-07-14 19:09:17 +100045 nvbe->node = node;
Ben Skeggs6ee73862009-12-11 19:24:15 +100046 return 0;
47}
48
49static int
Jerome Glisse649bf3c2011-11-01 20:46:13 -040050nv04_sgdma_unbind(struct ttm_tt *ttm)
Ben Skeggs6ee73862009-12-11 19:24:15 +100051{
Jerome Glisse649bf3c2011-11-01 20:46:13 -040052 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
Ben Skeggs3863c9b2012-07-14 19:09:17 +100053 nouveau_vm_unmap(&nvbe->node->vma[0]);
Ben Skeggs6ee73862009-12-11 19:24:15 +100054 return 0;
55}
56
Ben Skeggsefa58db2011-01-10 16:24:00 +100057static struct ttm_backend_func nv04_sgdma_backend = {
Ben Skeggsefa58db2011-01-10 16:24:00 +100058 .bind = nv04_sgdma_bind,
59 .unbind = nv04_sgdma_unbind,
60 .destroy = nouveau_sgdma_destroy
61};
Ben Skeggs6ee73862009-12-11 19:24:15 +100062
Ben Skeggsb571fe22010-11-16 10:13:05 +100063static int
Jerome Glisse649bf3c2011-11-01 20:46:13 -040064nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
Ben Skeggsb571fe22010-11-16 10:13:05 +100065{
Jerome Glisse8e7e7052011-11-09 17:15:26 -050066 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
Ben Skeggs26c0c9e2011-02-10 12:59:51 +100067 struct nouveau_mem *node = mem->mm_node;
Jerome Glisse649bf3c2011-11-01 20:46:13 -040068
Ben Skeggs26c0c9e2011-02-10 12:59:51 +100069 /* noop: bound in move_notify() */
Dave Airlie22b33e82012-04-02 11:53:06 +010070 if (ttm->sg) {
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +100071 node->sg = ttm->sg;
72 node->pages = NULL;
73 } else {
74 node->sg = NULL;
Dave Airlie22b33e82012-04-02 11:53:06 +010075 node->pages = nvbe->ttm.dma_address;
Ben Skeggs2e2cfbe2013-11-15 11:56:49 +100076 }
77 node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
Ben Skeggsb571fe22010-11-16 10:13:05 +100078 return 0;
79}
80
81static int
Jerome Glisse649bf3c2011-11-01 20:46:13 -040082nv50_sgdma_unbind(struct ttm_tt *ttm)
Ben Skeggsb571fe22010-11-16 10:13:05 +100083{
Ben Skeggs26c0c9e2011-02-10 12:59:51 +100084 /* noop: unbound in move_notify() */
Ben Skeggsb571fe22010-11-16 10:13:05 +100085 return 0;
86}
87
Ben Skeggsb571fe22010-11-16 10:13:05 +100088static struct ttm_backend_func nv50_sgdma_backend = {
Ben Skeggsb571fe22010-11-16 10:13:05 +100089 .bind = nv50_sgdma_bind,
90 .unbind = nv50_sgdma_unbind,
91 .destroy = nouveau_sgdma_destroy
92};
93
Jerome Glisse649bf3c2011-11-01 20:46:13 -040094struct ttm_tt *
95nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
96 unsigned long size, uint32_t page_flags,
97 struct page *dummy_read_page)
Ben Skeggs6ee73862009-12-11 19:24:15 +100098{
Ben Skeggsebb945a2012-07-20 08:17:34 +100099 struct nouveau_drm *drm = nouveau_bdev(bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000100 struct nouveau_sgdma_be *nvbe;
101
Ben Skeggs6ee73862009-12-11 19:24:15 +1000102 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
103 if (!nvbe)
104 return NULL;
105
Ben Skeggsebb945a2012-07-20 08:17:34 +1000106 nvbe->dev = drm->dev;
107 if (nv_device(drm->device)->card_type < NV_50)
108 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
109 else
110 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000111
Ben Skeggs7a59cc32013-09-17 14:13:32 +1000112 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400113 return NULL;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500114 return &nvbe->ttm.ttm;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000115}