blob: 77fcde6f34656863646b09f5bf20c4c9ee3efd48 [file] [log] [blame]
Dave Airlief64122c2013-02-25 14:47:55 +10001/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29/*
30 * TODO: allocating a new gem(in qxl_bo) for each request.
31 * This is wasteful since bo's are page aligned.
32 */
Dave Airlie6d01f1f2013-04-16 13:24:25 +100033static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34 struct drm_file *file_priv)
Dave Airlief64122c2013-02-25 14:47:55 +100035{
36 struct qxl_device *qdev = dev->dev_private;
37 struct drm_qxl_alloc *qxl_alloc = data;
38 int ret;
39 struct qxl_bo *qobj;
40 uint32_t handle;
41 u32 domain = QXL_GEM_DOMAIN_VRAM;
42
43 if (qxl_alloc->size == 0) {
44 DRM_ERROR("invalid size %d\n", qxl_alloc->size);
45 return -EINVAL;
46 }
47 ret = qxl_gem_object_create_with_handle(qdev, file_priv,
48 domain,
49 qxl_alloc->size,
50 NULL,
51 &qobj, &handle);
52 if (ret) {
53 DRM_ERROR("%s: failed to create gem ret=%d\n",
54 __func__, ret);
55 return -ENOMEM;
56 }
57 qxl_alloc->handle = handle;
58 return 0;
59}
60
Dave Airlie6d01f1f2013-04-16 13:24:25 +100061static int qxl_map_ioctl(struct drm_device *dev, void *data,
62 struct drm_file *file_priv)
Dave Airlief64122c2013-02-25 14:47:55 +100063{
64 struct qxl_device *qdev = dev->dev_private;
65 struct drm_qxl_map *qxl_map = data;
66
67 return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
68 &qxl_map->offset);
69}
70
Dave Airlie8002db62013-07-23 14:16:42 +100071struct qxl_reloc_info {
72 int type;
73 struct qxl_bo *dst_bo;
74 uint32_t dst_offset;
75 struct qxl_bo *src_bo;
76 int src_offset;
77};
78
Dave Airlief64122c2013-02-25 14:47:55 +100079/*
80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
81 * are on vram).
82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
83 */
84static void
Dave Airlie8002db62013-07-23 14:16:42 +100085apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
Dave Airlief64122c2013-02-25 14:47:55 +100086{
87 void *reloc_page;
Dave Airlie8002db62013-07-23 14:16:42 +100088 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
89 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
90 info->src_bo,
91 info->src_offset);
92 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
Dave Airlief64122c2013-02-25 14:47:55 +100093}
94
95static void
Dave Airlie8002db62013-07-23 14:16:42 +100096apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
Dave Airlief64122c2013-02-25 14:47:55 +100097{
98 uint32_t id = 0;
99 void *reloc_page;
100
Dave Airlie8002db62013-07-23 14:16:42 +1000101 if (info->src_bo && !info->src_bo->is_primary)
102 id = info->src_bo->surface_id;
Dave Airlief64122c2013-02-25 14:47:55 +1000103
Dave Airlie8002db62013-07-23 14:16:42 +1000104 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
105 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
106 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
Dave Airlief64122c2013-02-25 14:47:55 +1000107}
108
109/* return holding the reference to this object */
Dave Airlie6d01f1f2013-04-16 13:24:25 +1000110static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
111 struct drm_file *file_priv, uint64_t handle,
Dave Airlie8002db62013-07-23 14:16:42 +1000112 struct qxl_release *release)
Dave Airlief64122c2013-02-25 14:47:55 +1000113{
114 struct drm_gem_object *gobj;
115 struct qxl_bo *qobj;
116 int ret;
117
118 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
Dave Airlie8002db62013-07-23 14:16:42 +1000119 if (!gobj)
Dave Airlief64122c2013-02-25 14:47:55 +1000120 return NULL;
Dave Airlie8002db62013-07-23 14:16:42 +1000121
Dave Airlief64122c2013-02-25 14:47:55 +1000122 qobj = gem_to_qxl_bo(gobj);
123
Dave Airlie8002db62013-07-23 14:16:42 +1000124 ret = qxl_release_list_add(release, qobj);
Frediano Ziglio8451cc92015-06-03 12:09:10 +0100125 if (ret) {
126 drm_gem_object_unreference_unlocked(gobj);
Dave Airlief64122c2013-02-25 14:47:55 +1000127 return NULL;
Frediano Ziglio8451cc92015-06-03 12:09:10 +0100128 }
Dave Airlief64122c2013-02-25 14:47:55 +1000129
130 return qobj;
131}
132
133/*
134 * Usage of execbuffer:
135 * Relocations need to take into account the full QXLDrawable size.
136 * However, the command as passed from user space must *not* contain the initial
137 * QXLReleaseInfo struct (first XXX bytes)
138 */
Dave Airlie8002db62013-07-23 14:16:42 +1000139static int qxl_process_single_command(struct qxl_device *qdev,
140 struct drm_qxl_command *cmd,
141 struct drm_file *file_priv)
142{
143 struct qxl_reloc_info *reloc_info;
144 int release_type;
145 struct qxl_release *release;
146 struct qxl_bo *cmd_bo;
147 void *fb_cmd;
148 int i, j, ret, num_relocs;
149 int unwritten;
150
151 switch (cmd->type) {
152 case QXL_CMD_DRAW:
153 release_type = QXL_RELEASE_DRAWABLE;
154 break;
155 case QXL_CMD_SURFACE:
156 case QXL_CMD_CURSOR:
157 default:
158 DRM_DEBUG("Only draw commands in execbuffers\n");
159 return -EINVAL;
160 break;
161 }
162
163 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
164 return -EINVAL;
165
166 if (!access_ok(VERIFY_READ,
167 (void *)(unsigned long)cmd->command,
168 cmd->command_size))
169 return -EFAULT;
170
171 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
172 if (!reloc_info)
173 return -ENOMEM;
174
175 ret = qxl_alloc_release_reserved(qdev,
176 sizeof(union qxl_release_info) +
177 cmd->command_size,
178 release_type,
179 &release,
180 &cmd_bo);
181 if (ret)
182 goto out_free_reloc;
183
184 /* TODO copy slow path code from i915 */
185 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
186 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
187
188 {
189 struct qxl_drawable *draw = fb_cmd;
190 draw->mm_time = qdev->rom->mm_clock;
191 }
192
193 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
194 if (unwritten) {
195 DRM_ERROR("got unwritten %d\n", unwritten);
196 ret = -EFAULT;
197 goto out_free_release;
198 }
199
200 /* fill out reloc info structs */
201 num_relocs = 0;
202 for (i = 0; i < cmd->relocs_num; ++i) {
203 struct drm_qxl_reloc reloc;
204
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100205 if (copy_from_user(&reloc,
Dave Airlie8002db62013-07-23 14:16:42 +1000206 &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
207 sizeof(reloc))) {
208 ret = -EFAULT;
209 goto out_free_bos;
210 }
211
212 /* add the bos to the list of bos to validate -
213 need to validate first then process relocs? */
214 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
Frediano Ziglio55cc3df2015-06-03 12:09:11 +0100215 DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type);
Dave Airlie8002db62013-07-23 14:16:42 +1000216
217 ret = -EINVAL;
218 goto out_free_bos;
219 }
220 reloc_info[i].type = reloc.reloc_type;
221
222 if (reloc.dst_handle) {
223 reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
224 reloc.dst_handle, release);
225 if (!reloc_info[i].dst_bo) {
226 ret = -EINVAL;
227 reloc_info[i].src_bo = NULL;
228 goto out_free_bos;
229 }
230 reloc_info[i].dst_offset = reloc.dst_offset;
231 } else {
232 reloc_info[i].dst_bo = cmd_bo;
233 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
234 }
235 num_relocs++;
236
237 /* reserve and validate the reloc dst bo */
238 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
239 reloc_info[i].src_bo =
240 qxlhw_handle_to_bo(qdev, file_priv,
241 reloc.src_handle, release);
242 if (!reloc_info[i].src_bo) {
Dave Airlie8002db62013-07-23 14:16:42 +1000243 ret = -EINVAL;
244 goto out_free_bos;
245 }
246 reloc_info[i].src_offset = reloc.src_offset;
247 } else {
248 reloc_info[i].src_bo = NULL;
249 reloc_info[i].src_offset = 0;
250 }
251 }
252
253 /* validate all buffers */
254 ret = qxl_release_reserve_list(release, false);
255 if (ret)
256 goto out_free_bos;
257
258 for (i = 0; i < cmd->relocs_num; ++i) {
259 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
260 apply_reloc(qdev, &reloc_info[i]);
261 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
262 apply_surf_reloc(qdev, &reloc_info[i]);
263 }
264
265 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
266 if (ret)
267 qxl_release_backoff_reserve_list(release);
268 else
269 qxl_release_fence_buffer_objects(release);
270
271out_free_bos:
272 for (j = 0; j < num_relocs; j++) {
273 if (reloc_info[j].dst_bo != cmd_bo)
274 drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
275 if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
276 drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
277 }
278out_free_release:
279 if (ret)
280 qxl_release_free(qdev, release);
281out_free_reloc:
282 kfree(reloc_info);
283 return ret;
284}
285
Dave Airlie6d01f1f2013-04-16 13:24:25 +1000286static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
287 struct drm_file *file_priv)
Dave Airlief64122c2013-02-25 14:47:55 +1000288{
289 struct qxl_device *qdev = dev->dev_private;
290 struct drm_qxl_execbuffer *execbuffer = data;
291 struct drm_qxl_command user_cmd;
292 int cmd_num;
Dave Airlie8002db62013-07-23 14:16:42 +1000293 int ret;
Dave Airlief64122c2013-02-25 14:47:55 +1000294
295 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
Dave Airlie8002db62013-07-23 14:16:42 +1000296
Dave Airlief64122c2013-02-25 14:47:55 +1000297 struct drm_qxl_command *commands =
Dave Airlie970fa982013-05-31 12:45:09 +1000298 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
Dave Airlief64122c2013-02-25 14:47:55 +1000299
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100300 if (copy_from_user(&user_cmd, &commands[cmd_num],
Dave Airlief64122c2013-02-25 14:47:55 +1000301 sizeof(user_cmd)))
302 return -EFAULT;
Dave Airlief64122c2013-02-25 14:47:55 +1000303
Dave Airlie8002db62013-07-23 14:16:42 +1000304 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
Dave Airlief64122c2013-02-25 14:47:55 +1000305 if (ret)
306 return ret;
Dave Airlief64122c2013-02-25 14:47:55 +1000307 }
Dave Airlief64122c2013-02-25 14:47:55 +1000308 return 0;
309}
310
Dave Airlie6d01f1f2013-04-16 13:24:25 +1000311static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
312 struct drm_file *file)
Dave Airlief64122c2013-02-25 14:47:55 +1000313{
314 struct qxl_device *qdev = dev->dev_private;
315 struct drm_qxl_update_area *update_area = data;
316 struct qxl_rect area = {.left = update_area->left,
317 .top = update_area->top,
318 .right = update_area->right,
319 .bottom = update_area->bottom};
320 int ret;
321 struct drm_gem_object *gobj = NULL;
322 struct qxl_bo *qobj = NULL;
323
324 if (update_area->left >= update_area->right ||
325 update_area->top >= update_area->bottom)
326 return -EINVAL;
327
328 gobj = drm_gem_object_lookup(dev, file, update_area->handle);
329 if (gobj == NULL)
330 return -ENOENT;
331
332 qobj = gem_to_qxl_bo(gobj);
333
334 ret = qxl_bo_reserve(qobj, false);
335 if (ret)
336 goto out;
337
338 if (!qobj->pin_count) {
Dave Airlie4f49ec92013-07-23 14:06:07 +1000339 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
Dave Airlief64122c2013-02-25 14:47:55 +1000340 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
341 true, false);
342 if (unlikely(ret))
343 goto out;
344 }
345
346 ret = qxl_bo_check_id(qdev, qobj);
347 if (ret)
348 goto out2;
349 if (!qobj->surface_id)
350 DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
351 ret = qxl_io_update_area(qdev, qobj, &area);
352
353out2:
354 qxl_bo_unreserve(qobj);
355
356out:
357 drm_gem_object_unreference_unlocked(gobj);
358 return ret;
359}
360
361static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
362 struct drm_file *file_priv)
363{
364 struct qxl_device *qdev = dev->dev_private;
365 struct drm_qxl_getparam *param = data;
366
367 switch (param->param) {
368 case QXL_PARAM_NUM_SURFACES:
369 param->value = qdev->rom->n_surfaces;
370 break;
371 case QXL_PARAM_MAX_RELOCS:
372 param->value = QXL_MAX_RES;
373 break;
374 default:
375 return -EINVAL;
376 }
377 return 0;
378}
379
380static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
381 struct drm_file *file_priv)
382{
383 struct qxl_device *qdev = dev->dev_private;
384 struct drm_qxl_clientcap *param = data;
385 int byte, idx;
386
387 byte = param->index / 8;
388 idx = param->index % 8;
389
390 if (qdev->pdev->revision < 4)
391 return -ENOSYS;
392
Dave Airlie62c8ba72013-04-16 13:36:00 +1000393 if (byte >= 58)
Dave Airlief64122c2013-02-25 14:47:55 +1000394 return -ENOSYS;
395
396 if (qdev->rom->client_capabilities[byte] & (1 << idx))
397 return 0;
398 return -ENOSYS;
399}
400
401static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
402 struct drm_file *file)
403{
404 struct qxl_device *qdev = dev->dev_private;
405 struct drm_qxl_alloc_surf *param = data;
406 struct qxl_bo *qobj;
407 int handle;
408 int ret;
409 int size, actual_stride;
410 struct qxl_surface surf;
411
412 /* work out size allocate bo with handle */
413 actual_stride = param->stride < 0 ? -param->stride : param->stride;
414 size = actual_stride * param->height + actual_stride;
415
416 surf.format = param->format;
417 surf.width = param->width;
418 surf.height = param->height;
419 surf.stride = param->stride;
420 surf.data = 0;
421
422 ret = qxl_gem_object_create_with_handle(qdev, file,
423 QXL_GEM_DOMAIN_SURFACE,
424 size,
425 &surf,
426 &qobj, &handle);
427 if (ret) {
428 DRM_ERROR("%s: failed to create gem ret=%d\n",
429 __func__, ret);
430 return -ENOMEM;
431 } else
432 param->handle = handle;
433 return ret;
434}
435
Rob Clarkbaa70942013-08-02 13:27:49 -0400436const struct drm_ioctl_desc qxl_ioctls[] = {
Dave Airlief64122c2013-02-25 14:47:55 +1000437 DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
438
439 DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
440
441 DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
442 DRM_AUTH|DRM_UNLOCKED),
443 DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
444 DRM_AUTH|DRM_UNLOCKED),
445 DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
446 DRM_AUTH|DRM_UNLOCKED),
447 DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
448 DRM_AUTH|DRM_UNLOCKED),
449
450 DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
451 DRM_AUTH|DRM_UNLOCKED),
452};
453
Damien Lespiauf95aeb12014-06-09 14:39:49 +0100454int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);