Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1 | /************************************************************************** |
| 2 | * |
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
| 4 | * All Rights Reserved. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the |
| 8 | * "Software"), to deal in the Software without restriction, including |
| 9 | * without limitation the rights to use, copy, modify, merge, publish, |
| 10 | * distribute, sub license, and/or sell copies of the Software, and to |
| 11 | * permit persons to whom the Software is furnished to do so, subject to |
| 12 | * the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the |
| 15 | * next paragraph) shall be included in all copies or substantial portions |
| 16 | * of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 25 | * |
| 26 | **************************************************************************/ |
| 27 | |
| 28 | #include "vmwgfx_drv.h" |
| 29 | #include "vmwgfx_reg.h" |
| 30 | #include "ttm/ttm_bo_api.h" |
| 31 | #include "ttm/ttm_placement.h" |
| 32 | |
| 33 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, |
| 34 | struct vmw_sw_context *sw_context, |
| 35 | SVGA3dCmdHeader *header) |
| 36 | { |
| 37 | return capable(CAP_SYS_ADMIN) ? : -EINVAL; |
| 38 | } |
| 39 | |
| 40 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
| 41 | struct vmw_sw_context *sw_context, |
| 42 | SVGA3dCmdHeader *header) |
| 43 | { |
| 44 | return 0; |
| 45 | } |
| 46 | |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 47 | static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context, |
| 48 | struct vmw_resource **p_res) |
| 49 | { |
| 50 | int ret = 0; |
| 51 | struct vmw_resource *res = *p_res; |
| 52 | |
| 53 | if (!res->on_validate_list) { |
| 54 | if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) { |
| 55 | DRM_ERROR("Too many resources referenced in " |
| 56 | "command stream.\n"); |
| 57 | ret = -ENOMEM; |
| 58 | goto out; |
| 59 | } |
| 60 | sw_context->resources[sw_context->num_ref_resources++] = res; |
| 61 | res->on_validate_list = true; |
| 62 | return 0; |
| 63 | } |
| 64 | |
| 65 | out: |
| 66 | vmw_resource_unreference(p_res); |
| 67 | return ret; |
| 68 | } |
| 69 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 70 | /** |
| 71 | * vmw_bo_to_validate_list - add a bo to a validate list |
| 72 | * |
| 73 | * @sw_context: The software context used for this command submission batch. |
| 74 | * @bo: The buffer object to add. |
| 75 | * @fence_flags: Fence flags to be or'ed with any other fence flags for |
| 76 | * this buffer on this submission batch. |
| 77 | * @p_val_node: If non-NULL Will be updated with the validate node number |
| 78 | * on return. |
| 79 | * |
| 80 | * Returns -EINVAL if the limit of number of buffer objects per command |
| 81 | * submission is reached. |
| 82 | */ |
| 83 | static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, |
| 84 | struct ttm_buffer_object *bo, |
| 85 | uint32_t fence_flags, |
| 86 | uint32_t *p_val_node) |
| 87 | { |
| 88 | uint32_t val_node; |
| 89 | struct ttm_validate_buffer *val_buf; |
| 90 | |
| 91 | val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); |
| 92 | |
| 93 | if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { |
| 94 | DRM_ERROR("Max number of DMA buffers per submission" |
| 95 | " exceeded.\n"); |
| 96 | return -EINVAL; |
| 97 | } |
| 98 | |
| 99 | val_buf = &sw_context->val_bufs[val_node]; |
| 100 | if (unlikely(val_node == sw_context->cur_val_buf)) { |
| 101 | val_buf->new_sync_obj_arg = NULL; |
| 102 | val_buf->bo = ttm_bo_reference(bo); |
| 103 | val_buf->usage = TTM_USAGE_READWRITE; |
| 104 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
| 105 | ++sw_context->cur_val_buf; |
| 106 | } |
| 107 | |
| 108 | val_buf->new_sync_obj_arg = (void *) |
| 109 | ((unsigned long) val_buf->new_sync_obj_arg | fence_flags); |
| 110 | sw_context->fence_flags |= fence_flags; |
| 111 | |
| 112 | if (p_val_node) |
| 113 | *p_val_node = val_node; |
| 114 | |
| 115 | return 0; |
| 116 | } |
| 117 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 118 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, |
| 119 | struct vmw_sw_context *sw_context, |
| 120 | SVGA3dCmdHeader *header) |
| 121 | { |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 122 | struct vmw_resource *ctx; |
| 123 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 124 | struct vmw_cid_cmd { |
| 125 | SVGA3dCmdHeader header; |
| 126 | __le32 cid; |
| 127 | } *cmd; |
| 128 | int ret; |
| 129 | |
| 130 | cmd = container_of(header, struct vmw_cid_cmd, header); |
| 131 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) |
| 132 | return 0; |
| 133 | |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 134 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, |
| 135 | &ctx); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 136 | if (unlikely(ret != 0)) { |
| 137 | DRM_ERROR("Could not find or use context %u\n", |
| 138 | (unsigned) cmd->cid); |
| 139 | return ret; |
| 140 | } |
| 141 | |
| 142 | sw_context->last_cid = cmd->cid; |
| 143 | sw_context->cid_valid = true; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 144 | sw_context->cur_ctx = ctx; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 145 | return vmw_resource_to_validate_list(sw_context, &ctx); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, |
| 149 | struct vmw_sw_context *sw_context, |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 150 | uint32_t *sid) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 151 | { |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 152 | struct vmw_surface *srf; |
| 153 | int ret; |
| 154 | struct vmw_resource *res; |
| 155 | |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 156 | if (*sid == SVGA3D_INVALID_ID) |
| 157 | return 0; |
| 158 | |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 159 | if (likely((sw_context->sid_valid && |
| 160 | *sid == sw_context->last_sid))) { |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 161 | *sid = sw_context->sid_translation; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 162 | return 0; |
| 163 | } |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 164 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 165 | ret = vmw_user_surface_lookup_handle(dev_priv, |
| 166 | sw_context->tfile, |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 167 | *sid, &srf); |
| 168 | if (unlikely(ret != 0)) { |
| 169 | DRM_ERROR("Could ot find or use surface 0x%08x " |
| 170 | "address 0x%08lx\n", |
| 171 | (unsigned int) *sid, |
| 172 | (unsigned long) sid); |
| 173 | return ret; |
| 174 | } |
| 175 | |
| 176 | sw_context->last_sid = *sid; |
| 177 | sw_context->sid_valid = true; |
| 178 | sw_context->sid_translation = srf->res.id; |
| 179 | *sid = sw_context->sid_translation; |
| 180 | |
| 181 | res = &srf->res; |
| 182 | return vmw_resource_to_validate_list(sw_context, &res); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | |
| 186 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, |
| 187 | struct vmw_sw_context *sw_context, |
| 188 | SVGA3dCmdHeader *header) |
| 189 | { |
| 190 | struct vmw_sid_cmd { |
| 191 | SVGA3dCmdHeader header; |
| 192 | SVGA3dCmdSetRenderTarget body; |
| 193 | } *cmd; |
| 194 | int ret; |
| 195 | |
| 196 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 197 | if (unlikely(ret != 0)) |
| 198 | return ret; |
| 199 | |
| 200 | cmd = container_of(header, struct vmw_sid_cmd, header); |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 201 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); |
| 202 | return ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
| 206 | struct vmw_sw_context *sw_context, |
| 207 | SVGA3dCmdHeader *header) |
| 208 | { |
| 209 | struct vmw_sid_cmd { |
| 210 | SVGA3dCmdHeader header; |
| 211 | SVGA3dCmdSurfaceCopy body; |
| 212 | } *cmd; |
| 213 | int ret; |
| 214 | |
| 215 | cmd = container_of(header, struct vmw_sid_cmd, header); |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 216 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 217 | if (unlikely(ret != 0)) |
| 218 | return ret; |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 219 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
| 223 | struct vmw_sw_context *sw_context, |
| 224 | SVGA3dCmdHeader *header) |
| 225 | { |
| 226 | struct vmw_sid_cmd { |
| 227 | SVGA3dCmdHeader header; |
| 228 | SVGA3dCmdSurfaceStretchBlt body; |
| 229 | } *cmd; |
| 230 | int ret; |
| 231 | |
| 232 | cmd = container_of(header, struct vmw_sid_cmd, header); |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 233 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 234 | if (unlikely(ret != 0)) |
| 235 | return ret; |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 236 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
| 240 | struct vmw_sw_context *sw_context, |
| 241 | SVGA3dCmdHeader *header) |
| 242 | { |
| 243 | struct vmw_sid_cmd { |
| 244 | SVGA3dCmdHeader header; |
| 245 | SVGA3dCmdBlitSurfaceToScreen body; |
| 246 | } *cmd; |
| 247 | |
| 248 | cmd = container_of(header, struct vmw_sid_cmd, header); |
Jakob Bornecrantz | 0cff60c | 2011-10-04 20:13:27 +0200 | [diff] [blame] | 249 | |
| 250 | if (unlikely(!sw_context->kernel)) { |
| 251 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); |
| 252 | return -EPERM; |
| 253 | } |
| 254 | |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 255 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 256 | } |
| 257 | |
| 258 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
| 259 | struct vmw_sw_context *sw_context, |
| 260 | SVGA3dCmdHeader *header) |
| 261 | { |
| 262 | struct vmw_sid_cmd { |
| 263 | SVGA3dCmdHeader header; |
| 264 | SVGA3dCmdPresent body; |
| 265 | } *cmd; |
| 266 | |
| 267 | cmd = container_of(header, struct vmw_sid_cmd, header); |
Jakob Bornecrantz | 0cff60c | 2011-10-04 20:13:27 +0200 | [diff] [blame] | 268 | |
| 269 | if (unlikely(!sw_context->kernel)) { |
| 270 | DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); |
| 271 | return -EPERM; |
| 272 | } |
| 273 | |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 274 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 275 | } |
| 276 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 277 | /** |
| 278 | * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. |
| 279 | * |
| 280 | * @dev_priv: The device private structure. |
| 281 | * @cid: The hardware context for the next query. |
| 282 | * @new_query_bo: The new buffer holding query results. |
| 283 | * @sw_context: The software context used for this command submission. |
| 284 | * |
| 285 | * This function checks whether @new_query_bo is suitable for holding |
| 286 | * query results, and if another buffer currently is pinned for query |
| 287 | * results. If so, the function prepares the state of @sw_context for |
| 288 | * switching pinned buffers after successful submission of the current |
| 289 | * command batch. It also checks whether we're using a new query context. |
| 290 | * In that case, it makes sure we emit a query barrier for the old |
| 291 | * context before the current query buffer is fenced. |
| 292 | */ |
| 293 | static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, |
| 294 | uint32_t cid, |
| 295 | struct ttm_buffer_object *new_query_bo, |
| 296 | struct vmw_sw_context *sw_context) |
| 297 | { |
| 298 | int ret; |
| 299 | bool add_cid = false; |
| 300 | uint32_t cid_to_add; |
| 301 | |
| 302 | if (unlikely(new_query_bo != sw_context->cur_query_bo)) { |
| 303 | |
| 304 | if (unlikely(new_query_bo->num_pages > 4)) { |
| 305 | DRM_ERROR("Query buffer too large.\n"); |
| 306 | return -EINVAL; |
| 307 | } |
| 308 | |
| 309 | if (unlikely(sw_context->cur_query_bo != NULL)) { |
| 310 | BUG_ON(!sw_context->query_cid_valid); |
| 311 | add_cid = true; |
| 312 | cid_to_add = sw_context->cur_query_cid; |
| 313 | ret = vmw_bo_to_validate_list(sw_context, |
| 314 | sw_context->cur_query_bo, |
| 315 | DRM_VMW_FENCE_FLAG_EXEC, |
| 316 | NULL); |
| 317 | if (unlikely(ret != 0)) |
| 318 | return ret; |
| 319 | } |
| 320 | sw_context->cur_query_bo = new_query_bo; |
| 321 | |
| 322 | ret = vmw_bo_to_validate_list(sw_context, |
| 323 | dev_priv->dummy_query_bo, |
| 324 | DRM_VMW_FENCE_FLAG_EXEC, |
| 325 | NULL); |
| 326 | if (unlikely(ret != 0)) |
| 327 | return ret; |
| 328 | |
| 329 | } |
| 330 | |
| 331 | if (unlikely(cid != sw_context->cur_query_cid && |
| 332 | sw_context->query_cid_valid)) { |
| 333 | add_cid = true; |
| 334 | cid_to_add = sw_context->cur_query_cid; |
| 335 | } |
| 336 | |
| 337 | sw_context->cur_query_cid = cid; |
| 338 | sw_context->query_cid_valid = true; |
| 339 | |
| 340 | if (add_cid) { |
| 341 | struct vmw_resource *ctx = sw_context->cur_ctx; |
| 342 | |
| 343 | if (list_empty(&ctx->query_head)) |
| 344 | list_add_tail(&ctx->query_head, |
| 345 | &sw_context->query_list); |
| 346 | ret = vmw_bo_to_validate_list(sw_context, |
| 347 | dev_priv->dummy_query_bo, |
| 348 | DRM_VMW_FENCE_FLAG_EXEC, |
| 349 | NULL); |
| 350 | if (unlikely(ret != 0)) |
| 351 | return ret; |
| 352 | } |
| 353 | return 0; |
| 354 | } |
| 355 | |
| 356 | |
| 357 | /** |
| 358 | * vmw_query_bo_switch_commit - Finalize switching pinned query buffer |
| 359 | * |
| 360 | * @dev_priv: The device private structure. |
| 361 | * @sw_context: The software context used for this command submission batch. |
| 362 | * |
| 363 | * This function will check if we're switching query buffers, and will then, |
| 364 | * if no other query waits are issued this command submission batch, |
| 365 | * issue a dummy occlusion query wait used as a query barrier. When the fence |
| 366 | * object following that query wait has signaled, we are sure that all |
| 367 | * preseding queries have finished, and the old query buffer can be unpinned. |
| 368 | * However, since both the new query buffer and the old one are fenced with |
| 369 | * that fence, we can do an asynchronus unpin now, and be sure that the |
| 370 | * old query buffer won't be moved until the fence has signaled. |
| 371 | * |
| 372 | * As mentioned above, both the new - and old query buffers need to be fenced |
| 373 | * using a sequence emitted *after* calling this function. |
| 374 | */ |
| 375 | static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, |
| 376 | struct vmw_sw_context *sw_context) |
| 377 | { |
| 378 | |
| 379 | struct vmw_resource *ctx, *next_ctx; |
| 380 | int ret; |
| 381 | |
| 382 | /* |
| 383 | * The validate list should still hold references to all |
| 384 | * contexts here. |
| 385 | */ |
| 386 | |
| 387 | list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, |
| 388 | query_head) { |
| 389 | list_del_init(&ctx->query_head); |
| 390 | |
| 391 | BUG_ON(!ctx->on_validate_list); |
| 392 | |
| 393 | ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); |
| 394 | |
| 395 | if (unlikely(ret != 0)) |
| 396 | DRM_ERROR("Out of fifo space for dummy query.\n"); |
| 397 | } |
| 398 | |
| 399 | if (dev_priv->pinned_bo != sw_context->cur_query_bo) { |
| 400 | if (dev_priv->pinned_bo) { |
| 401 | vmw_bo_pin(dev_priv->pinned_bo, false); |
| 402 | ttm_bo_unref(&dev_priv->pinned_bo); |
| 403 | } |
| 404 | |
| 405 | vmw_bo_pin(sw_context->cur_query_bo, true); |
| 406 | |
| 407 | /* |
| 408 | * We pin also the dummy_query_bo buffer so that we |
| 409 | * don't need to validate it when emitting |
| 410 | * dummy queries in context destroy paths. |
| 411 | */ |
| 412 | |
| 413 | vmw_bo_pin(dev_priv->dummy_query_bo, true); |
| 414 | dev_priv->dummy_query_bo_pinned = true; |
| 415 | |
| 416 | dev_priv->query_cid = sw_context->cur_query_cid; |
| 417 | dev_priv->pinned_bo = |
| 418 | ttm_bo_reference(sw_context->cur_query_bo); |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | /** |
| 423 | * vmw_query_switch_backoff - clear query barrier list |
| 424 | * @sw_context: The sw context used for this submission batch. |
| 425 | * |
| 426 | * This function is used as part of an error path, where a previously |
| 427 | * set up list of query barriers needs to be cleared. |
| 428 | * |
| 429 | */ |
| 430 | static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context) |
| 431 | { |
| 432 | struct list_head *list, *next; |
| 433 | |
| 434 | list_for_each_safe(list, next, &sw_context->query_list) { |
| 435 | list_del_init(list); |
| 436 | } |
| 437 | } |
| 438 | |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 439 | static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, |
| 440 | struct vmw_sw_context *sw_context, |
| 441 | SVGAGuestPtr *ptr, |
| 442 | struct vmw_dma_buffer **vmw_bo_p) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 443 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 444 | struct vmw_dma_buffer *vmw_bo = NULL; |
| 445 | struct ttm_buffer_object *bo; |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 446 | uint32_t handle = ptr->gmrId; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 447 | struct vmw_relocation *reloc; |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 448 | int ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 449 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 450 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
| 451 | if (unlikely(ret != 0)) { |
| 452 | DRM_ERROR("Could not find or use GMR region.\n"); |
| 453 | return -EINVAL; |
| 454 | } |
| 455 | bo = &vmw_bo->base; |
| 456 | |
| 457 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 458 | DRM_ERROR("Max number relocations per submission" |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 459 | " exceeded\n"); |
| 460 | ret = -EINVAL; |
| 461 | goto out_no_reloc; |
| 462 | } |
| 463 | |
| 464 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 465 | reloc->location = ptr; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 466 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 467 | ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC, |
| 468 | &reloc->index); |
| 469 | if (unlikely(ret != 0)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 470 | goto out_no_reloc; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 471 | |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 472 | *vmw_bo_p = vmw_bo; |
| 473 | return 0; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 474 | |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 475 | out_no_reloc: |
| 476 | vmw_dmabuf_unreference(&vmw_bo); |
| 477 | vmw_bo_p = NULL; |
| 478 | return ret; |
| 479 | } |
| 480 | |
| 481 | static int vmw_cmd_end_query(struct vmw_private *dev_priv, |
| 482 | struct vmw_sw_context *sw_context, |
| 483 | SVGA3dCmdHeader *header) |
| 484 | { |
| 485 | struct vmw_dma_buffer *vmw_bo; |
| 486 | struct vmw_query_cmd { |
| 487 | SVGA3dCmdHeader header; |
| 488 | SVGA3dCmdEndQuery q; |
| 489 | } *cmd; |
| 490 | int ret; |
| 491 | |
| 492 | cmd = container_of(header, struct vmw_query_cmd, header); |
| 493 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 494 | if (unlikely(ret != 0)) |
| 495 | return ret; |
| 496 | |
| 497 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
| 498 | &cmd->q.guestResult, |
| 499 | &vmw_bo); |
| 500 | if (unlikely(ret != 0)) |
| 501 | return ret; |
| 502 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 503 | ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, |
| 504 | &vmw_bo->base, sw_context); |
| 505 | |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 506 | vmw_dmabuf_unreference(&vmw_bo); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 507 | return ret; |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | static int vmw_cmd_wait_query(struct vmw_private *dev_priv, |
| 511 | struct vmw_sw_context *sw_context, |
| 512 | SVGA3dCmdHeader *header) |
| 513 | { |
| 514 | struct vmw_dma_buffer *vmw_bo; |
| 515 | struct vmw_query_cmd { |
| 516 | SVGA3dCmdHeader header; |
| 517 | SVGA3dCmdWaitForQuery q; |
| 518 | } *cmd; |
| 519 | int ret; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 520 | struct vmw_resource *ctx; |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 521 | |
| 522 | cmd = container_of(header, struct vmw_query_cmd, header); |
| 523 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 524 | if (unlikely(ret != 0)) |
| 525 | return ret; |
| 526 | |
| 527 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
| 528 | &cmd->q.guestResult, |
| 529 | &vmw_bo); |
| 530 | if (unlikely(ret != 0)) |
| 531 | return ret; |
| 532 | |
| 533 | vmw_dmabuf_unreference(&vmw_bo); |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 534 | |
| 535 | /* |
| 536 | * This wait will act as a barrier for previous waits for this |
| 537 | * context. |
| 538 | */ |
| 539 | |
| 540 | ctx = sw_context->cur_ctx; |
| 541 | if (!list_empty(&ctx->query_head)) |
| 542 | list_del_init(&ctx->query_head); |
| 543 | |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 544 | return 0; |
| 545 | } |
| 546 | |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 547 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
| 548 | struct vmw_sw_context *sw_context, |
| 549 | SVGA3dCmdHeader *header) |
| 550 | { |
| 551 | struct vmw_dma_buffer *vmw_bo = NULL; |
| 552 | struct ttm_buffer_object *bo; |
| 553 | struct vmw_surface *srf = NULL; |
| 554 | struct vmw_dma_cmd { |
| 555 | SVGA3dCmdHeader header; |
| 556 | SVGA3dCmdSurfaceDMA dma; |
| 557 | } *cmd; |
| 558 | int ret; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 559 | struct vmw_resource *res; |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 560 | |
| 561 | cmd = container_of(header, struct vmw_dma_cmd, header); |
| 562 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
| 563 | &cmd->dma.guest.ptr, |
| 564 | &vmw_bo); |
| 565 | if (unlikely(ret != 0)) |
| 566 | return ret; |
| 567 | |
| 568 | bo = &vmw_bo->base; |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 569 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
| 570 | cmd->dma.host.sid, &srf); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 571 | if (ret) { |
| 572 | DRM_ERROR("could not find surface\n"); |
| 573 | goto out_no_reloc; |
| 574 | } |
| 575 | |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 576 | /* |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 577 | * Patch command stream with device SID. |
| 578 | */ |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 579 | cmd->dma.host.sid = srf->res.id; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 580 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 581 | |
| 582 | vmw_dmabuf_unreference(&vmw_bo); |
| 583 | |
| 584 | res = &srf->res; |
| 585 | return vmw_resource_to_validate_list(sw_context, &res); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 586 | |
| 587 | out_no_reloc: |
| 588 | vmw_dmabuf_unreference(&vmw_bo); |
| 589 | return ret; |
| 590 | } |
| 591 | |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 592 | static int vmw_cmd_draw(struct vmw_private *dev_priv, |
| 593 | struct vmw_sw_context *sw_context, |
| 594 | SVGA3dCmdHeader *header) |
| 595 | { |
| 596 | struct vmw_draw_cmd { |
| 597 | SVGA3dCmdHeader header; |
| 598 | SVGA3dCmdDrawPrimitives body; |
| 599 | } *cmd; |
| 600 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( |
| 601 | (unsigned long)header + sizeof(*cmd)); |
| 602 | SVGA3dPrimitiveRange *range; |
| 603 | uint32_t i; |
| 604 | uint32_t maxnum; |
| 605 | int ret; |
| 606 | |
| 607 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 608 | if (unlikely(ret != 0)) |
| 609 | return ret; |
| 610 | |
| 611 | cmd = container_of(header, struct vmw_draw_cmd, header); |
| 612 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); |
| 613 | |
| 614 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { |
| 615 | DRM_ERROR("Illegal number of vertex declarations.\n"); |
| 616 | return -EINVAL; |
| 617 | } |
| 618 | |
| 619 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { |
| 620 | ret = vmw_cmd_sid_check(dev_priv, sw_context, |
| 621 | &decl->array.surfaceId); |
| 622 | if (unlikely(ret != 0)) |
| 623 | return ret; |
| 624 | } |
| 625 | |
| 626 | maxnum = (header->size - sizeof(cmd->body) - |
| 627 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); |
| 628 | if (unlikely(cmd->body.numRanges > maxnum)) { |
| 629 | DRM_ERROR("Illegal number of index ranges.\n"); |
| 630 | return -EINVAL; |
| 631 | } |
| 632 | |
| 633 | range = (SVGA3dPrimitiveRange *) decl; |
| 634 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { |
| 635 | ret = vmw_cmd_sid_check(dev_priv, sw_context, |
| 636 | &range->indexArray.surfaceId); |
| 637 | if (unlikely(ret != 0)) |
| 638 | return ret; |
| 639 | } |
| 640 | return 0; |
| 641 | } |
| 642 | |
| 643 | |
| 644 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, |
| 645 | struct vmw_sw_context *sw_context, |
| 646 | SVGA3dCmdHeader *header) |
| 647 | { |
| 648 | struct vmw_tex_state_cmd { |
| 649 | SVGA3dCmdHeader header; |
| 650 | SVGA3dCmdSetTextureState state; |
| 651 | }; |
| 652 | |
| 653 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) |
| 654 | ((unsigned long) header + header->size + sizeof(header)); |
| 655 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) |
| 656 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); |
| 657 | int ret; |
| 658 | |
| 659 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); |
| 660 | if (unlikely(ret != 0)) |
| 661 | return ret; |
| 662 | |
| 663 | for (; cur_state < last_state; ++cur_state) { |
| 664 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) |
| 665 | continue; |
| 666 | |
| 667 | ret = vmw_cmd_sid_check(dev_priv, sw_context, |
| 668 | &cur_state->value); |
| 669 | if (unlikely(ret != 0)) |
| 670 | return ret; |
| 671 | } |
| 672 | |
| 673 | return 0; |
| 674 | } |
| 675 | |
Jakob Bornecrantz | 4084fb8 | 2011-10-04 20:13:19 +0200 | [diff] [blame] | 676 | static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, |
| 677 | struct vmw_sw_context *sw_context, |
| 678 | void *buf) |
| 679 | { |
| 680 | struct vmw_dma_buffer *vmw_bo; |
| 681 | int ret; |
| 682 | |
| 683 | struct { |
| 684 | uint32_t header; |
| 685 | SVGAFifoCmdDefineGMRFB body; |
| 686 | } *cmd = buf; |
| 687 | |
| 688 | ret = vmw_translate_guest_ptr(dev_priv, sw_context, |
| 689 | &cmd->body.ptr, |
| 690 | &vmw_bo); |
| 691 | if (unlikely(ret != 0)) |
| 692 | return ret; |
| 693 | |
| 694 | vmw_dmabuf_unreference(&vmw_bo); |
| 695 | |
| 696 | return ret; |
| 697 | } |
| 698 | |
| 699 | static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, |
| 700 | struct vmw_sw_context *sw_context, |
| 701 | void *buf, uint32_t *size) |
| 702 | { |
| 703 | uint32_t size_remaining = *size; |
Jakob Bornecrantz | 4084fb8 | 2011-10-04 20:13:19 +0200 | [diff] [blame] | 704 | uint32_t cmd_id; |
| 705 | |
| 706 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
| 707 | switch (cmd_id) { |
| 708 | case SVGA_CMD_UPDATE: |
| 709 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); |
Jakob Bornecrantz | 4084fb8 | 2011-10-04 20:13:19 +0200 | [diff] [blame] | 710 | break; |
| 711 | case SVGA_CMD_DEFINE_GMRFB: |
| 712 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB); |
| 713 | break; |
| 714 | case SVGA_CMD_BLIT_GMRFB_TO_SCREEN: |
| 715 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
| 716 | break; |
| 717 | case SVGA_CMD_BLIT_SCREEN_TO_GMRFB: |
| 718 | *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen); |
| 719 | break; |
| 720 | default: |
| 721 | DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id); |
| 722 | return -EINVAL; |
| 723 | } |
| 724 | |
| 725 | if (*size > size_remaining) { |
| 726 | DRM_ERROR("Invalid SVGA command (size mismatch):" |
| 727 | " %u.\n", cmd_id); |
| 728 | return -EINVAL; |
| 729 | } |
| 730 | |
Jakob Bornecrantz | 0cff60c | 2011-10-04 20:13:27 +0200 | [diff] [blame] | 731 | if (unlikely(!sw_context->kernel)) { |
Jakob Bornecrantz | 4084fb8 | 2011-10-04 20:13:19 +0200 | [diff] [blame] | 732 | DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id); |
| 733 | return -EPERM; |
| 734 | } |
| 735 | |
| 736 | if (cmd_id == SVGA_CMD_DEFINE_GMRFB) |
| 737 | return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf); |
| 738 | |
| 739 | return 0; |
| 740 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 741 | |
| 742 | typedef int (*vmw_cmd_func) (struct vmw_private *, |
| 743 | struct vmw_sw_context *, |
| 744 | SVGA3dCmdHeader *); |
| 745 | |
| 746 | #define VMW_CMD_DEF(cmd, func) \ |
| 747 | [cmd - SVGA_3D_CMD_BASE] = func |
| 748 | |
| 749 | static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { |
| 750 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), |
| 751 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), |
| 752 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), |
| 753 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), |
| 754 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), |
| 755 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), |
| 756 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), |
| 757 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), |
| 758 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), |
| 759 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), |
| 760 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
| 761 | &vmw_cmd_set_render_target_check), |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 762 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 763 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), |
| 764 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), |
| 765 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), |
| 766 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), |
| 767 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), |
| 768 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), |
| 769 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), |
| 770 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), |
| 771 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), |
| 772 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), |
| 773 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 774 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 775 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
| 776 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), |
Thomas Hellstrom | 4e4ddd4 | 2010-02-21 14:54:55 +0000 | [diff] [blame] | 777 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), |
| 778 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 779 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), |
| 780 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, |
| 781 | &vmw_cmd_blt_surf_screen_check) |
| 782 | }; |
| 783 | |
| 784 | static int vmw_cmd_check(struct vmw_private *dev_priv, |
| 785 | struct vmw_sw_context *sw_context, |
| 786 | void *buf, uint32_t *size) |
| 787 | { |
| 788 | uint32_t cmd_id; |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 789 | uint32_t size_remaining = *size; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 790 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
| 791 | int ret; |
| 792 | |
Jakob Bornecrantz | 4084fb8 | 2011-10-04 20:13:19 +0200 | [diff] [blame] | 793 | cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); |
| 794 | /* Handle any none 3D commands */ |
| 795 | if (unlikely(cmd_id < SVGA_CMD_MAX)) |
| 796 | return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); |
| 797 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 798 | |
| 799 | cmd_id = le32_to_cpu(header->id); |
| 800 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
| 801 | |
| 802 | cmd_id -= SVGA_3D_CMD_BASE; |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 803 | if (unlikely(*size > size_remaining)) |
| 804 | goto out_err; |
| 805 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 806 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
| 807 | goto out_err; |
| 808 | |
| 809 | ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); |
| 810 | if (unlikely(ret != 0)) |
| 811 | goto out_err; |
| 812 | |
| 813 | return 0; |
| 814 | out_err: |
| 815 | DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", |
| 816 | cmd_id + SVGA_3D_CMD_BASE); |
| 817 | return -EINVAL; |
| 818 | } |
| 819 | |
| 820 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, |
| 821 | struct vmw_sw_context *sw_context, |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 822 | void *buf, |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 823 | uint32_t size) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 824 | { |
| 825 | int32_t cur_size = size; |
| 826 | int ret; |
| 827 | |
| 828 | while (cur_size > 0) { |
Thomas Hellstrom | 7a73ba7 | 2009-12-22 16:53:41 +0100 | [diff] [blame] | 829 | size = cur_size; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 830 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
| 831 | if (unlikely(ret != 0)) |
| 832 | return ret; |
| 833 | buf = (void *)((unsigned long) buf + size); |
| 834 | cur_size -= size; |
| 835 | } |
| 836 | |
| 837 | if (unlikely(cur_size != 0)) { |
| 838 | DRM_ERROR("Command verifier out of sync.\n"); |
| 839 | return -EINVAL; |
| 840 | } |
| 841 | |
| 842 | return 0; |
| 843 | } |
| 844 | |
| 845 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) |
| 846 | { |
| 847 | sw_context->cur_reloc = 0; |
| 848 | } |
| 849 | |
| 850 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) |
| 851 | { |
| 852 | uint32_t i; |
| 853 | struct vmw_relocation *reloc; |
| 854 | struct ttm_validate_buffer *validate; |
| 855 | struct ttm_buffer_object *bo; |
| 856 | |
| 857 | for (i = 0; i < sw_context->cur_reloc; ++i) { |
| 858 | reloc = &sw_context->relocs[i]; |
| 859 | validate = &sw_context->val_bufs[reloc->index]; |
| 860 | bo = validate->bo; |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 861 | if (bo->mem.mem_type == TTM_PL_VRAM) { |
| 862 | reloc->location->offset += bo->offset; |
| 863 | reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; |
| 864 | } else |
| 865 | reloc->location->gmrId = bo->mem.start; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 866 | } |
| 867 | vmw_free_relocations(sw_context); |
| 868 | } |
| 869 | |
| 870 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) |
| 871 | { |
| 872 | struct ttm_validate_buffer *entry, *next; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 873 | uint32_t i = sw_context->num_ref_resources; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 874 | |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 875 | /* |
| 876 | * Drop references to DMA buffers held during command submission. |
| 877 | */ |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 878 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, |
| 879 | head) { |
| 880 | list_del(&entry->head); |
| 881 | vmw_dmabuf_validate_clear(entry->bo); |
| 882 | ttm_bo_unref(&entry->bo); |
| 883 | sw_context->cur_val_buf--; |
| 884 | } |
| 885 | BUG_ON(sw_context->cur_val_buf != 0); |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 886 | |
| 887 | /* |
| 888 | * Drop references to resources held during command submission. |
| 889 | */ |
| 890 | while (i-- > 0) { |
| 891 | sw_context->resources[i]->on_validate_list = false; |
| 892 | vmw_resource_unreference(&sw_context->resources[i]); |
| 893 | } |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 894 | } |
| 895 | |
| 896 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, |
| 897 | struct ttm_buffer_object *bo) |
| 898 | { |
| 899 | int ret; |
| 900 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 901 | |
| 902 | /* |
| 903 | * Don't validate pinned buffers. |
| 904 | */ |
| 905 | |
| 906 | if (bo == dev_priv->pinned_bo || |
| 907 | (bo == dev_priv->dummy_query_bo && |
| 908 | dev_priv->dummy_query_bo_pinned)) |
| 909 | return 0; |
| 910 | |
Thomas Hellstrom | 8ba5152 | 2010-01-16 16:05:05 +0100 | [diff] [blame] | 911 | /** |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 912 | * Put BO in VRAM if there is space, otherwise as a GMR. |
| 913 | * If there is no space in VRAM and GMR ids are all used up, |
| 914 | * start evicting GMRs to make room. If the DMA buffer can't be |
| 915 | * used as a GMR, this will return -ENOMEM. |
Thomas Hellstrom | 8ba5152 | 2010-01-16 16:05:05 +0100 | [diff] [blame] | 916 | */ |
| 917 | |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 918 | ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); |
Thomas Hellstrom | 3d3a5b3 | 2009-12-08 12:59:34 +0100 | [diff] [blame] | 919 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 920 | return ret; |
| 921 | |
Thomas Hellstrom | 8ba5152 | 2010-01-16 16:05:05 +0100 | [diff] [blame] | 922 | /** |
| 923 | * If that failed, try VRAM again, this time evicting |
| 924 | * previous contents. |
| 925 | */ |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 926 | |
Thomas Hellstrom | 135cba0 | 2010-10-26 21:21:47 +0200 | [diff] [blame] | 927 | DRM_INFO("Falling through to VRAM.\n"); |
Jerome Glisse | 9d87fa2 | 2010-04-07 10:21:19 +0000 | [diff] [blame] | 928 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 929 | return ret; |
| 930 | } |
| 931 | |
| 932 | |
| 933 | static int vmw_validate_buffers(struct vmw_private *dev_priv, |
| 934 | struct vmw_sw_context *sw_context) |
| 935 | { |
| 936 | struct ttm_validate_buffer *entry; |
| 937 | int ret; |
| 938 | |
| 939 | list_for_each_entry(entry, &sw_context->validate_nodes, head) { |
| 940 | ret = vmw_validate_single_buffer(dev_priv, entry->bo); |
| 941 | if (unlikely(ret != 0)) |
| 942 | return ret; |
| 943 | } |
| 944 | return 0; |
| 945 | } |
| 946 | |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 947 | static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, |
| 948 | uint32_t size) |
| 949 | { |
| 950 | if (likely(sw_context->cmd_bounce_size >= size)) |
| 951 | return 0; |
| 952 | |
| 953 | if (sw_context->cmd_bounce_size == 0) |
| 954 | sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE; |
| 955 | |
| 956 | while (sw_context->cmd_bounce_size < size) { |
| 957 | sw_context->cmd_bounce_size = |
| 958 | PAGE_ALIGN(sw_context->cmd_bounce_size + |
| 959 | (sw_context->cmd_bounce_size >> 1)); |
| 960 | } |
| 961 | |
| 962 | if (sw_context->cmd_bounce != NULL) |
| 963 | vfree(sw_context->cmd_bounce); |
| 964 | |
| 965 | sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); |
| 966 | |
| 967 | if (sw_context->cmd_bounce == NULL) { |
| 968 | DRM_ERROR("Failed to allocate command bounce buffer.\n"); |
| 969 | sw_context->cmd_bounce_size = 0; |
| 970 | return -ENOMEM; |
| 971 | } |
| 972 | |
| 973 | return 0; |
| 974 | } |
| 975 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 976 | /** |
| 977 | * vmw_execbuf_fence_commands - create and submit a command stream fence |
| 978 | * |
| 979 | * Creates a fence object and submits a command stream marker. |
| 980 | * If this fails for some reason, We sync the fifo and return NULL. |
| 981 | * It is then safe to fence buffers with a NULL pointer. |
Jakob Bornecrantz | 6070e9f | 2011-10-04 20:13:16 +0200 | [diff] [blame] | 982 | * |
| 983 | * If @p_handle is not NULL @file_priv must also not be NULL. Creates |
| 984 | * a userspace handle if @p_handle is not NULL, otherwise not. |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 985 | */ |
| 986 | |
| 987 | int vmw_execbuf_fence_commands(struct drm_file *file_priv, |
| 988 | struct vmw_private *dev_priv, |
| 989 | struct vmw_fence_obj **p_fence, |
| 990 | uint32_t *p_handle) |
| 991 | { |
| 992 | uint32_t sequence; |
| 993 | int ret; |
| 994 | bool synced = false; |
| 995 | |
Jakob Bornecrantz | 6070e9f | 2011-10-04 20:13:16 +0200 | [diff] [blame] | 996 | /* p_handle implies file_priv. */ |
| 997 | BUG_ON(p_handle != NULL && file_priv == NULL); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 998 | |
| 999 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
| 1000 | if (unlikely(ret != 0)) { |
| 1001 | DRM_ERROR("Fence submission error. Syncing.\n"); |
| 1002 | synced = true; |
| 1003 | } |
| 1004 | |
| 1005 | if (p_handle != NULL) |
| 1006 | ret = vmw_user_fence_create(file_priv, dev_priv->fman, |
| 1007 | sequence, |
| 1008 | DRM_VMW_FENCE_FLAG_EXEC, |
| 1009 | p_fence, p_handle); |
| 1010 | else |
| 1011 | ret = vmw_fence_create(dev_priv->fman, sequence, |
| 1012 | DRM_VMW_FENCE_FLAG_EXEC, |
| 1013 | p_fence); |
| 1014 | |
| 1015 | if (unlikely(ret != 0 && !synced)) { |
| 1016 | (void) vmw_fallback_wait(dev_priv, false, false, |
| 1017 | sequence, false, |
| 1018 | VMW_FENCE_WAIT_TIMEOUT); |
| 1019 | *p_fence = NULL; |
| 1020 | } |
| 1021 | |
| 1022 | return 0; |
| 1023 | } |
| 1024 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1025 | int vmw_execbuf_process(struct drm_file *file_priv, |
| 1026 | struct vmw_private *dev_priv, |
| 1027 | void __user *user_commands, |
| 1028 | void *kernel_commands, |
| 1029 | uint32_t command_size, |
| 1030 | uint64_t throttle_us, |
| 1031 | struct drm_vmw_fence_rep __user *user_fence_rep) |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1032 | { |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1033 | struct vmw_sw_context *sw_context = &dev_priv->ctx; |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1034 | struct drm_vmw_fence_rep fence_rep; |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1035 | struct vmw_fence_obj *fence; |
| 1036 | uint32_t handle; |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1037 | void *cmd; |
| 1038 | int ret; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1039 | |
| 1040 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 1041 | if (unlikely(ret != 0)) |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1042 | return -ERESTARTSYS; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1043 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1044 | if (kernel_commands == NULL) { |
| 1045 | sw_context->kernel = false; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1046 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1047 | ret = vmw_resize_cmd_bounce(sw_context, command_size); |
| 1048 | if (unlikely(ret != 0)) |
| 1049 | goto out_unlock; |
| 1050 | |
| 1051 | |
| 1052 | ret = copy_from_user(sw_context->cmd_bounce, |
| 1053 | user_commands, command_size); |
| 1054 | |
| 1055 | if (unlikely(ret != 0)) { |
| 1056 | ret = -EFAULT; |
| 1057 | DRM_ERROR("Failed copying commands.\n"); |
| 1058 | goto out_unlock; |
| 1059 | } |
| 1060 | kernel_commands = sw_context->cmd_bounce; |
| 1061 | } else |
| 1062 | sw_context->kernel = true; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1063 | |
| 1064 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; |
| 1065 | sw_context->cid_valid = false; |
| 1066 | sw_context->sid_valid = false; |
| 1067 | sw_context->cur_reloc = 0; |
| 1068 | sw_context->cur_val_buf = 0; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 1069 | sw_context->num_ref_resources = 0; |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 1070 | sw_context->fence_flags = 0; |
| 1071 | INIT_LIST_HEAD(&sw_context->query_list); |
| 1072 | sw_context->cur_query_bo = dev_priv->pinned_bo; |
| 1073 | sw_context->cur_query_cid = dev_priv->query_cid; |
| 1074 | sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1075 | |
| 1076 | INIT_LIST_HEAD(&sw_context->validate_nodes); |
| 1077 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1078 | ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, |
| 1079 | command_size); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1080 | if (unlikely(ret != 0)) |
| 1081 | goto out_err; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 1082 | |
Thomas Hellstrom | 6570596 | 2010-11-17 12:28:31 +0000 | [diff] [blame] | 1083 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1084 | if (unlikely(ret != 0)) |
| 1085 | goto out_err; |
| 1086 | |
| 1087 | ret = vmw_validate_buffers(dev_priv, sw_context); |
| 1088 | if (unlikely(ret != 0)) |
| 1089 | goto out_err; |
| 1090 | |
| 1091 | vmw_apply_relocations(sw_context); |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 1092 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1093 | if (throttle_us) { |
Thomas Hellstrom | 6bcd8d3c | 2011-09-01 20:18:42 +0000 | [diff] [blame] | 1094 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1095 | throttle_us); |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 1096 | |
| 1097 | if (unlikely(ret != 0)) |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 1098 | goto out_throttle; |
Thomas Hellstrom | 1925d45 | 2010-05-28 11:21:57 +0200 | [diff] [blame] | 1099 | } |
| 1100 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1101 | cmd = vmw_fifo_reserve(dev_priv, command_size); |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 1102 | if (unlikely(cmd == NULL)) { |
| 1103 | DRM_ERROR("Failed reserving fifo space for commands.\n"); |
| 1104 | ret = -ENOMEM; |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1105 | goto out_throttle; |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 1106 | } |
| 1107 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1108 | memcpy(cmd, kernel_commands, command_size); |
| 1109 | vmw_fifo_commit(dev_priv, command_size); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1110 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 1111 | vmw_query_bo_switch_commit(dev_priv, sw_context); |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1112 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, |
| 1113 | &fence, |
| 1114 | (user_fence_rep) ? &handle : NULL); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1115 | /* |
| 1116 | * This error is harmless, because if fence submission fails, |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1117 | * vmw_fifo_send_fence will sync. The error will be propagated to |
| 1118 | * user-space in @fence_rep |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1119 | */ |
| 1120 | |
| 1121 | if (ret != 0) |
| 1122 | DRM_ERROR("Fence submission error. Syncing.\n"); |
| 1123 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1124 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, |
| 1125 | (void *) fence); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1126 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1127 | vmw_clear_validations(sw_context); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1128 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1129 | if (user_fence_rep) { |
| 1130 | fence_rep.error = ret; |
| 1131 | fence_rep.handle = handle; |
| 1132 | fence_rep.seqno = fence->seqno; |
| 1133 | vmw_update_seqno(dev_priv, &dev_priv->fifo); |
| 1134 | fence_rep.passed_seqno = dev_priv->last_read_seqno; |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1135 | |
Thomas Hellstrom | ae2a104 | 2011-09-01 20:18:44 +0000 | [diff] [blame] | 1136 | /* |
| 1137 | * copy_to_user errors will be detected by user space not |
| 1138 | * seeing fence_rep::error filled in. Typically |
| 1139 | * user-space would have pre-set that member to -EFAULT. |
| 1140 | */ |
| 1141 | ret = copy_to_user(user_fence_rep, &fence_rep, |
| 1142 | sizeof(fence_rep)); |
| 1143 | |
| 1144 | /* |
| 1145 | * User-space lost the fence object. We need to sync |
| 1146 | * and unreference the handle. |
| 1147 | */ |
| 1148 | if (unlikely(ret != 0) && (fence_rep.error == 0)) { |
| 1149 | BUG_ON(fence == NULL); |
| 1150 | |
| 1151 | ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, |
| 1152 | handle, TTM_REF_USAGE); |
| 1153 | DRM_ERROR("Fence copy error. Syncing.\n"); |
| 1154 | (void) vmw_fence_obj_wait(fence, |
| 1155 | fence->signal_mask, |
| 1156 | false, false, |
| 1157 | VMW_FENCE_WAIT_TIMEOUT); |
| 1158 | } |
| 1159 | } |
| 1160 | |
| 1161 | if (likely(fence != NULL)) |
| 1162 | vmw_fence_obj_unreference(&fence); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1163 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1164 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1165 | return 0; |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1166 | |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1167 | out_err: |
| 1168 | vmw_free_relocations(sw_context); |
Thomas Hellstrom | be38ab6 | 2011-08-31 07:42:54 +0000 | [diff] [blame] | 1169 | out_throttle: |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 1170 | vmw_query_switch_backoff(sw_context); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1171 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); |
| 1172 | vmw_clear_validations(sw_context); |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1173 | out_unlock: |
| 1174 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1175 | return ret; |
| 1176 | } |
| 1177 | |
Thomas Hellstrom | e2fa3a7 | 2011-10-04 20:13:30 +0200 | [diff] [blame^] | 1178 | /** |
| 1179 | * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. |
| 1180 | * |
| 1181 | * @dev_priv: The device private structure. |
| 1182 | * |
| 1183 | * This function is called to idle the fifo and unpin the query buffer |
| 1184 | * if the normal way to do this hits an error, which should typically be |
| 1185 | * extremely rare. |
| 1186 | */ |
| 1187 | static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) |
| 1188 | { |
| 1189 | DRM_ERROR("Can't unpin query buffer. Trying to recover.\n"); |
| 1190 | |
| 1191 | (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); |
| 1192 | vmw_bo_pin(dev_priv->pinned_bo, false); |
| 1193 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
| 1194 | dev_priv->dummy_query_bo_pinned = false; |
| 1195 | } |
| 1196 | |
| 1197 | |
| 1198 | /** |
| 1199 | * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned |
| 1200 | * query bo. |
| 1201 | * |
| 1202 | * @dev_priv: The device private structure. |
| 1203 | * @only_on_cid_match: Only flush and unpin if the current active query cid |
| 1204 | * matches @cid. |
| 1205 | * @cid: Optional context id to match. |
| 1206 | * |
| 1207 | * This function should be used to unpin the pinned query bo, or |
| 1208 | * as a query barrier when we need to make sure that all queries have |
| 1209 | * finished before the next fifo command. (For example on hardware |
| 1210 | * context destructions where the hardware may otherwise leak unfinished |
| 1211 | * queries). |
| 1212 | * |
| 1213 | * This function does not return any failure codes, but make attempts |
| 1214 | * to do safe unpinning in case of errors. |
| 1215 | * |
| 1216 | * The function will synchronize on the previous query barrier, and will |
| 1217 | * thus not finish until that barrier has executed. |
| 1218 | */ |
| 1219 | void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, |
| 1220 | bool only_on_cid_match, uint32_t cid) |
| 1221 | { |
| 1222 | int ret = 0; |
| 1223 | struct list_head validate_list; |
| 1224 | struct ttm_validate_buffer pinned_val, query_val; |
| 1225 | struct vmw_fence_obj *fence; |
| 1226 | |
| 1227 | mutex_lock(&dev_priv->cmdbuf_mutex); |
| 1228 | |
| 1229 | if (dev_priv->pinned_bo == NULL) |
| 1230 | goto out_unlock; |
| 1231 | |
| 1232 | if (only_on_cid_match && cid != dev_priv->query_cid) |
| 1233 | goto out_unlock; |
| 1234 | |
| 1235 | INIT_LIST_HEAD(&validate_list); |
| 1236 | |
| 1237 | pinned_val.new_sync_obj_arg = (void *)(unsigned long) |
| 1238 | DRM_VMW_FENCE_FLAG_EXEC; |
| 1239 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
| 1240 | list_add_tail(&pinned_val.head, &validate_list); |
| 1241 | |
| 1242 | query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg; |
| 1243 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); |
| 1244 | list_add_tail(&query_val.head, &validate_list); |
| 1245 | |
| 1246 | do { |
| 1247 | ret = ttm_eu_reserve_buffers(&validate_list); |
| 1248 | } while (ret == -ERESTARTSYS); |
| 1249 | |
| 1250 | if (unlikely(ret != 0)) { |
| 1251 | vmw_execbuf_unpin_panic(dev_priv); |
| 1252 | goto out_no_reserve; |
| 1253 | } |
| 1254 | |
| 1255 | ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); |
| 1256 | if (unlikely(ret != 0)) { |
| 1257 | vmw_execbuf_unpin_panic(dev_priv); |
| 1258 | goto out_no_emit; |
| 1259 | } |
| 1260 | |
| 1261 | vmw_bo_pin(dev_priv->pinned_bo, false); |
| 1262 | vmw_bo_pin(dev_priv->dummy_query_bo, false); |
| 1263 | dev_priv->dummy_query_bo_pinned = false; |
| 1264 | |
| 1265 | (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
| 1266 | ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); |
| 1267 | |
| 1268 | ttm_bo_unref(&query_val.bo); |
| 1269 | ttm_bo_unref(&pinned_val.bo); |
| 1270 | ttm_bo_unref(&dev_priv->pinned_bo); |
| 1271 | |
| 1272 | out_unlock: |
| 1273 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 1274 | return; |
| 1275 | |
| 1276 | out_no_emit: |
| 1277 | ttm_eu_backoff_reservation(&validate_list); |
| 1278 | out_no_reserve: |
| 1279 | ttm_bo_unref(&query_val.bo); |
| 1280 | ttm_bo_unref(&pinned_val.bo); |
| 1281 | ttm_bo_unref(&dev_priv->pinned_bo); |
| 1282 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
| 1283 | } |
| 1284 | |
Thomas Hellstrom | 922ade0 | 2011-10-04 20:13:17 +0200 | [diff] [blame] | 1285 | |
| 1286 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, |
| 1287 | struct drm_file *file_priv) |
| 1288 | { |
| 1289 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1290 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; |
| 1291 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
| 1292 | int ret; |
| 1293 | |
| 1294 | /* |
| 1295 | * This will allow us to extend the ioctl argument while |
| 1296 | * maintaining backwards compatibility: |
| 1297 | * We take different code paths depending on the value of |
| 1298 | * arg->version. |
| 1299 | */ |
| 1300 | |
| 1301 | if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { |
| 1302 | DRM_ERROR("Incorrect execbuf version.\n"); |
| 1303 | DRM_ERROR("You're running outdated experimental " |
| 1304 | "vmwgfx user-space drivers."); |
| 1305 | return -EINVAL; |
| 1306 | } |
| 1307 | |
| 1308 | ret = ttm_read_lock(&vmaster->lock, true); |
| 1309 | if (unlikely(ret != 0)) |
| 1310 | return ret; |
| 1311 | |
| 1312 | ret = vmw_execbuf_process(file_priv, dev_priv, |
| 1313 | (void __user *)(unsigned long)arg->commands, |
| 1314 | NULL, arg->command_size, arg->throttle_us, |
| 1315 | (void __user *)(unsigned long)arg->fence_rep); |
| 1316 | |
| 1317 | if (unlikely(ret != 0)) |
| 1318 | goto out_unlock; |
| 1319 | |
| 1320 | vmw_kms_cursor_post_execbuf(dev_priv); |
| 1321 | |
| 1322 | out_unlock: |
Jakob Bornecrantz | fb1d973 | 2009-12-10 00:19:58 +0000 | [diff] [blame] | 1323 | ttm_read_unlock(&vmaster->lock); |
| 1324 | return ret; |
| 1325 | } |