Dave Airlie | 5320918 | 2010-12-15 07:14:24 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 Red Hat |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General Public |
| 5 | * License v2. See the file COPYING in the main directory of this archive for |
| 6 | * more details. |
| 7 | */ |
| 8 | |
| 9 | #include "drmP.h" |
| 10 | #include "udl_drv.h" |
| 11 | #include <linux/shmem_fs.h> |
| 12 | |
| 13 | struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, |
| 14 | size_t size) |
| 15 | { |
| 16 | struct udl_gem_object *obj; |
| 17 | |
| 18 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
| 19 | if (obj == NULL) |
| 20 | return NULL; |
| 21 | |
| 22 | if (drm_gem_object_init(dev, &obj->base, size) != 0) { |
| 23 | kfree(obj); |
| 24 | return NULL; |
| 25 | } |
| 26 | |
| 27 | return obj; |
| 28 | } |
| 29 | |
| 30 | static int |
| 31 | udl_gem_create(struct drm_file *file, |
| 32 | struct drm_device *dev, |
| 33 | uint64_t size, |
| 34 | uint32_t *handle_p) |
| 35 | { |
| 36 | struct udl_gem_object *obj; |
| 37 | int ret; |
| 38 | u32 handle; |
| 39 | |
| 40 | size = roundup(size, PAGE_SIZE); |
| 41 | |
| 42 | obj = udl_gem_alloc_object(dev, size); |
| 43 | if (obj == NULL) |
| 44 | return -ENOMEM; |
| 45 | |
| 46 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
| 47 | if (ret) { |
| 48 | drm_gem_object_release(&obj->base); |
| 49 | kfree(obj); |
| 50 | return ret; |
| 51 | } |
| 52 | |
| 53 | drm_gem_object_unreference(&obj->base); |
| 54 | *handle_p = handle; |
| 55 | return 0; |
| 56 | } |
| 57 | |
| 58 | int udl_dumb_create(struct drm_file *file, |
| 59 | struct drm_device *dev, |
| 60 | struct drm_mode_create_dumb *args) |
| 61 | { |
| 62 | args->pitch = args->width * ((args->bpp + 1) / 8); |
| 63 | args->size = args->pitch * args->height; |
| 64 | return udl_gem_create(file, dev, |
| 65 | args->size, &args->handle); |
| 66 | } |
| 67 | |
| 68 | int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev, |
| 69 | uint32_t handle) |
| 70 | { |
| 71 | return drm_gem_handle_delete(file, handle); |
| 72 | } |
| 73 | |
Konstantin Khlebnikov | fa9e855 | 2012-03-31 13:29:25 +0400 | [diff] [blame] | 74 | int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 75 | { |
| 76 | int ret; |
| 77 | |
| 78 | ret = drm_gem_mmap(filp, vma); |
| 79 | if (ret) |
| 80 | return ret; |
| 81 | |
| 82 | vma->vm_flags &= ~VM_PFNMAP; |
| 83 | vma->vm_flags |= VM_MIXEDMAP; |
| 84 | |
| 85 | return ret; |
| 86 | } |
| 87 | |
Dave Airlie | 5320918 | 2010-12-15 07:14:24 +1000 | [diff] [blame] | 88 | int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 89 | { |
| 90 | struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); |
| 91 | struct page *page; |
| 92 | unsigned int page_offset; |
| 93 | int ret = 0; |
| 94 | |
| 95 | page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> |
| 96 | PAGE_SHIFT; |
| 97 | |
| 98 | if (!obj->pages) |
| 99 | return VM_FAULT_SIGBUS; |
| 100 | |
| 101 | page = obj->pages[page_offset]; |
| 102 | ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); |
| 103 | switch (ret) { |
| 104 | case -EAGAIN: |
| 105 | set_need_resched(); |
| 106 | case 0: |
| 107 | case -ERESTARTSYS: |
| 108 | return VM_FAULT_NOPAGE; |
| 109 | case -ENOMEM: |
| 110 | return VM_FAULT_OOM; |
| 111 | default: |
| 112 | return VM_FAULT_SIGBUS; |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | int udl_gem_init_object(struct drm_gem_object *obj) |
| 117 | { |
| 118 | BUG(); |
| 119 | |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask) |
| 124 | { |
| 125 | int page_count, i; |
| 126 | struct page *page; |
| 127 | struct inode *inode; |
| 128 | struct address_space *mapping; |
| 129 | |
| 130 | if (obj->pages) |
| 131 | return 0; |
| 132 | |
| 133 | page_count = obj->base.size / PAGE_SIZE; |
| 134 | BUG_ON(obj->pages != NULL); |
| 135 | obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); |
| 136 | if (obj->pages == NULL) |
| 137 | return -ENOMEM; |
| 138 | |
| 139 | inode = obj->base.filp->f_path.dentry->d_inode; |
| 140 | mapping = inode->i_mapping; |
| 141 | gfpmask |= mapping_gfp_mask(mapping); |
| 142 | |
| 143 | for (i = 0; i < page_count; i++) { |
| 144 | page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); |
| 145 | if (IS_ERR(page)) |
| 146 | goto err_pages; |
| 147 | obj->pages[i] = page; |
| 148 | } |
| 149 | |
| 150 | return 0; |
| 151 | err_pages: |
| 152 | while (i--) |
| 153 | page_cache_release(obj->pages[i]); |
| 154 | drm_free_large(obj->pages); |
| 155 | obj->pages = NULL; |
| 156 | return PTR_ERR(page); |
| 157 | } |
| 158 | |
| 159 | static void udl_gem_put_pages(struct udl_gem_object *obj) |
| 160 | { |
| 161 | int page_count = obj->base.size / PAGE_SIZE; |
| 162 | int i; |
| 163 | |
| 164 | for (i = 0; i < page_count; i++) |
| 165 | page_cache_release(obj->pages[i]); |
| 166 | |
| 167 | drm_free_large(obj->pages); |
| 168 | obj->pages = NULL; |
| 169 | } |
| 170 | |
| 171 | int udl_gem_vmap(struct udl_gem_object *obj) |
| 172 | { |
| 173 | int page_count = obj->base.size / PAGE_SIZE; |
| 174 | int ret; |
| 175 | |
| 176 | ret = udl_gem_get_pages(obj, GFP_KERNEL); |
| 177 | if (ret) |
| 178 | return ret; |
| 179 | |
| 180 | obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); |
| 181 | if (!obj->vmapping) |
| 182 | return -ENOMEM; |
| 183 | return 0; |
| 184 | } |
| 185 | |
| 186 | void udl_gem_vunmap(struct udl_gem_object *obj) |
| 187 | { |
| 188 | if (obj->vmapping) |
| 189 | vunmap(obj->vmapping); |
| 190 | |
| 191 | udl_gem_put_pages(obj); |
| 192 | } |
| 193 | |
| 194 | void udl_gem_free_object(struct drm_gem_object *gem_obj) |
| 195 | { |
| 196 | struct udl_gem_object *obj = to_udl_bo(gem_obj); |
| 197 | |
| 198 | if (obj->vmapping) |
| 199 | udl_gem_vunmap(obj); |
| 200 | |
| 201 | if (obj->pages) |
| 202 | udl_gem_put_pages(obj); |
| 203 | |
| 204 | if (gem_obj->map_list.map) |
| 205 | drm_gem_free_mmap_offset(gem_obj); |
| 206 | } |
| 207 | |
| 208 | /* the dumb interface doesn't work with the GEM straight MMAP |
| 209 | interface, it expects to do MMAP on the drm fd, like normal */ |
| 210 | int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, |
| 211 | uint32_t handle, uint64_t *offset) |
| 212 | { |
| 213 | struct udl_gem_object *gobj; |
| 214 | struct drm_gem_object *obj; |
| 215 | int ret = 0; |
| 216 | |
| 217 | mutex_lock(&dev->struct_mutex); |
| 218 | obj = drm_gem_object_lookup(dev, file, handle); |
| 219 | if (obj == NULL) { |
| 220 | ret = -ENOENT; |
| 221 | goto unlock; |
| 222 | } |
| 223 | gobj = to_udl_bo(obj); |
| 224 | |
| 225 | ret = udl_gem_get_pages(gobj, GFP_KERNEL); |
| 226 | if (ret) |
| 227 | return ret; |
| 228 | if (!gobj->base.map_list.map) { |
| 229 | ret = drm_gem_create_mmap_offset(obj); |
| 230 | if (ret) |
| 231 | goto out; |
| 232 | } |
| 233 | |
| 234 | *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; |
| 235 | |
| 236 | out: |
| 237 | drm_gem_object_unreference(&gobj->base); |
| 238 | unlock: |
| 239 | mutex_unlock(&dev->struct_mutex); |
| 240 | return ret; |
| 241 | } |