Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014, 2015 Red Hat. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * on the rights to use, copy, modify, merge, publish, distribute, sub |
| 8 | * license, and/or sell copies of the Software, and to permit persons to whom |
| 9 | * the Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 19 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 20 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 21 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 22 | */ |
Eric Anholt | 882ca6d | 2019-06-27 15:05:31 -0700 | [diff] [blame^] | 23 | #include "util/format/u_format.h" |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 24 | #include "util/u_inlines.h" |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 25 | #include "util/u_memory.h" |
Alexandros Frantzis | a22c5df | 2019-05-08 12:10:21 +0300 | [diff] [blame] | 26 | #include "util/u_upload_mgr.h" |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 27 | #include "virgl_context.h" |
Emil Velikov | 493e410 | 2015-10-29 10:17:04 +0000 | [diff] [blame] | 28 | #include "virgl_resource.h" |
| 29 | #include "virgl_screen.h" |
Alexandros Frantzis | 5388be0 | 2019-06-24 16:57:46 +0300 | [diff] [blame] | 30 | #include "virgl_staging_mgr.h" |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 31 | |
Alexandros Frantzis | f8f222e | 2019-06-05 16:50:11 +0300 | [diff] [blame] | 32 | /* A (soft) limit for the amount of memory we want to allow for queued staging |
| 33 | * resources. This is used to decide when we should force a flush, in order to |
| 34 | * avoid exhausting virtio-gpu memory. |
| 35 | */ |
| 36 | #define VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT (128 * 1024 * 1024) |
| 37 | |
Alexandros Frantzis | 4271430 | 2019-07-05 16:08:43 +0300 | [diff] [blame] | 38 | enum virgl_transfer_map_type { |
| 39 | VIRGL_TRANSFER_MAP_ERROR = -1, |
| 40 | VIRGL_TRANSFER_MAP_HW_RES, |
| 41 | |
| 42 | /* Map a range of a staging buffer. The updated contents should be transferred |
| 43 | * with a copy transfer. |
| 44 | */ |
| 45 | VIRGL_TRANSFER_MAP_STAGING, |
| 46 | |
| 47 | /* Reallocate the underlying virgl_hw_res. */ |
| 48 | VIRGL_TRANSFER_MAP_REALLOC, |
| 49 | }; |
| 50 | |
Chia-I Wu | 34810f4 | 2019-05-09 20:40:28 -0700 | [diff] [blame] | 51 | /* We need to flush to properly sync the transfer with the current cmdbuf. |
| 52 | * But there are cases where the flushing can be skipped: |
| 53 | * |
| 54 | * - synchronization is disabled |
| 55 | * - the resource is not referenced by the current cmdbuf |
Chia-I Wu | 34810f4 | 2019-05-09 20:40:28 -0700 | [diff] [blame] | 56 | */ |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 57 | static bool virgl_res_needs_flush(struct virgl_context *vctx, |
| 58 | struct virgl_transfer *trans) |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 59 | { |
Chia-I Wu | 34810f4 | 2019-05-09 20:40:28 -0700 | [diff] [blame] | 60 | struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; |
Gurchetan Singh | ab6ea6e | 2019-02-08 18:07:37 -0800 | [diff] [blame] | 61 | struct virgl_resource *res = virgl_resource(trans->base.resource); |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 62 | |
Gurchetan Singh | 90e9650 | 2019-02-05 18:53:23 -0800 | [diff] [blame] | 63 | if (trans->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) |
| 64 | return false; |
Chia-I Wu | 34810f4 | 2019-05-09 20:40:28 -0700 | [diff] [blame] | 65 | |
| 66 | if (!vws->res_is_referenced(vws, vctx->cbuf, res->hw_res)) |
Gurchetan Singh | 90e9650 | 2019-02-05 18:53:23 -0800 | [diff] [blame] | 67 | return false; |
Chia-I Wu | 34810f4 | 2019-05-09 20:40:28 -0700 | [diff] [blame] | 68 | |
Gurchetan Singh | 90e9650 | 2019-02-05 18:53:23 -0800 | [diff] [blame] | 69 | return true; |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 70 | } |
| 71 | |
Chia-I Wu | a62ab17 | 2019-05-09 20:40:41 -0700 | [diff] [blame] | 72 | /* We need to read back from the host storage to make sure the guest storage |
| 73 | * is up-to-date. But there are cases where the readback can be skipped: |
| 74 | * |
| 75 | * - the content can be discarded |
| 76 | * - the host storage is read-only |
| 77 | * |
| 78 | * Note that PIPE_TRANSFER_WRITE without discard bits requires readback. |
| 79 | * PIPE_TRANSFER_READ becomes irrelevant. PIPE_TRANSFER_UNSYNCHRONIZED and |
| 80 | * PIPE_TRANSFER_FLUSH_EXPLICIT are also irrelevant. |
| 81 | */ |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 82 | static bool virgl_res_needs_readback(struct virgl_context *vctx, |
| 83 | struct virgl_resource *res, |
| 84 | unsigned usage, unsigned level) |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 85 | { |
Chia-I Wu | cdcf38b | 2019-05-14 10:14:03 -0700 | [diff] [blame] | 86 | if (usage & (PIPE_TRANSFER_DISCARD_RANGE | |
| 87 | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) |
Chia-I Wu | a62ab17 | 2019-05-09 20:40:41 -0700 | [diff] [blame] | 88 | return false; |
| 89 | |
Gurchetan Singh | d6dc68e | 2019-03-13 22:58:22 +0000 | [diff] [blame] | 90 | if (res->clean_mask & (1 << level)) |
Chia-I Wu | a62ab17 | 2019-05-09 20:40:41 -0700 | [diff] [blame] | 91 | return false; |
| 92 | |
| 93 | return true; |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 94 | } |
| 95 | |
Alexandros Frantzis | 4271430 | 2019-07-05 16:08:43 +0300 | [diff] [blame] | 96 | static enum virgl_transfer_map_type |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 97 | virgl_resource_transfer_prepare(struct virgl_context *vctx, |
| 98 | struct virgl_transfer *xfer) |
| 99 | { |
Alexandros Frantzis | a22c5df | 2019-05-08 12:10:21 +0300 | [diff] [blame] | 100 | struct virgl_screen *vs = virgl_screen(vctx->base.screen); |
| 101 | struct virgl_winsys *vws = vs->vws; |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 102 | struct virgl_resource *res = virgl_resource(xfer->base.resource); |
Chia-I Wu | 659c580 | 2019-05-10 21:23:12 -0700 | [diff] [blame] | 103 | enum virgl_transfer_map_type map_type = VIRGL_TRANSFER_MAP_HW_RES; |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 104 | bool flush; |
| 105 | bool readback; |
| 106 | bool wait; |
| 107 | |
Chia-I Wu | 659c580 | 2019-05-10 21:23:12 -0700 | [diff] [blame] | 108 | /* there is no way to map the host storage currently */ |
| 109 | if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) |
| 110 | return VIRGL_TRANSFER_MAP_ERROR; |
| 111 | |
Chia-I Wu | 0a0be7a | 2019-05-21 23:21:27 +0000 | [diff] [blame] | 112 | /* We break the logic down into four steps |
| 113 | * |
| 114 | * step 1: determine the required operations independently |
| 115 | * step 2: look for chances to skip the operations |
| 116 | * step 3: resolve dependencies between the operations |
| 117 | * step 4: execute the operations |
| 118 | */ |
| 119 | |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 120 | flush = virgl_res_needs_flush(vctx, xfer); |
| 121 | readback = virgl_res_needs_readback(vctx, res, xfer->base.usage, |
| 122 | xfer->base.level); |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 123 | /* We need to wait for all cmdbufs, current or previous, that access the |
Chia-I Wu | 0a0be7a | 2019-05-21 23:21:27 +0000 | [diff] [blame] | 124 | * resource to finish unless synchronization is disabled. |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 125 | */ |
Chia-I Wu | 1fece5f | 2019-05-09 21:44:33 -0700 | [diff] [blame] | 126 | wait = !(xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED); |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 127 | |
| 128 | /* When the transfer range consists of only uninitialized data, we can |
| 129 | * assume the GPU is not accessing the range and readback is unnecessary. |
| 130 | * We can proceed as if PIPE_TRANSFER_UNSYNCHRONIZED and |
| 131 | * PIPE_TRANSFER_DISCARD_RANGE are set. |
| 132 | */ |
| 133 | if (res->u.b.target == PIPE_BUFFER && |
Chia-I Wu | 74786b3 | 2019-06-24 10:47:59 -0700 | [diff] [blame] | 134 | !util_ranges_intersect(&res->valid_buffer_range, xfer->base.box.x, |
| 135 | xfer->base.box.x + xfer->base.box.width) && |
| 136 | likely(!(virgl_debug & VIRGL_DEBUG_XFER))) { |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 137 | flush = false; |
| 138 | readback = false; |
| 139 | wait = false; |
Alexandros Frantzis | a22c5df | 2019-05-08 12:10:21 +0300 | [diff] [blame] | 140 | } |
| 141 | |
Chia-I Wu | 1fece5f | 2019-05-09 21:44:33 -0700 | [diff] [blame] | 142 | /* When the resource is busy but its content can be discarded, we can |
| 143 | * replace its HW resource or use a staging buffer to avoid waiting. |
Alexandros Frantzis | a22c5df | 2019-05-08 12:10:21 +0300 | [diff] [blame] | 144 | */ |
Chia-I Wu | 74786b3 | 2019-06-24 10:47:59 -0700 | [diff] [blame] | 145 | if (wait && |
| 146 | (xfer->base.usage & (PIPE_TRANSFER_DISCARD_RANGE | |
| 147 | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) && |
| 148 | likely(!(virgl_debug & VIRGL_DEBUG_XFER))) { |
Chia-I Wu | 98eda99 | 2019-06-17 09:53:48 -0700 | [diff] [blame] | 149 | bool can_realloc = false; |
| 150 | bool can_staging = false; |
| 151 | |
| 152 | /* A PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE transfer may be followed by |
| 153 | * PIPE_TRANSFER_UNSYNCHRONIZED transfers to non-overlapping regions. |
| 154 | * It cannot be treated as a PIPE_TRANSFER_DISCARD_RANGE transfer, |
| 155 | * otherwise those following unsynchronized transfers may overwrite |
| 156 | * valid data. |
| 157 | */ |
| 158 | if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) { |
| 159 | can_realloc = virgl_can_rebind_resource(vctx, &res->u.b); |
| 160 | } else { |
Alexandros Frantzis | 5388be0 | 2019-06-24 16:57:46 +0300 | [diff] [blame] | 161 | can_staging = vctx->supports_staging; |
Chia-I Wu | 98eda99 | 2019-06-17 09:53:48 -0700 | [diff] [blame] | 162 | } |
Chia-I Wu | 1fece5f | 2019-05-09 21:44:33 -0700 | [diff] [blame] | 163 | |
| 164 | /* discard implies no readback */ |
| 165 | assert(!readback); |
| 166 | |
| 167 | if (can_realloc || can_staging) { |
| 168 | /* Both map types have some costs. Do them only when the resource is |
| 169 | * (or will be) busy for real. Otherwise, set wait to false. |
| 170 | */ |
| 171 | wait = (flush || vws->resource_is_busy(vws, res->hw_res)); |
| 172 | if (wait) { |
| 173 | map_type = (can_realloc) ? |
| 174 | VIRGL_TRANSFER_MAP_REALLOC : |
| 175 | VIRGL_TRANSFER_MAP_STAGING; |
| 176 | wait = false; |
| 177 | |
| 178 | /* There is normally no need to flush either, unless the amount of |
| 179 | * memory we are using for staging resources starts growing, in |
| 180 | * which case we want to flush to keep our memory consumption in |
| 181 | * check. |
| 182 | */ |
| 183 | flush = (vctx->queued_staging_res_size > |
| 184 | VIRGL_QUEUED_STAGING_RES_SIZE_LIMIT); |
| 185 | } |
| 186 | } |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 187 | } |
| 188 | |
Chia-I Wu | 0a0be7a | 2019-05-21 23:21:27 +0000 | [diff] [blame] | 189 | /* readback has some implications */ |
| 190 | if (readback) { |
| 191 | /* Readback is yet another command and is transparent to the state |
| 192 | * trackers. It should be waited for in all cases, including when |
| 193 | * PIPE_TRANSFER_UNSYNCHRONIZED is set. |
| 194 | */ |
| 195 | wait = true; |
| 196 | |
| 197 | /* When the transfer queue has pending writes to this transfer's region, |
| 198 | * we have to flush before readback. |
| 199 | */ |
| 200 | if (!flush && virgl_transfer_queue_is_queued(&vctx->queue, xfer)) |
| 201 | flush = true; |
| 202 | } |
| 203 | |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 204 | if (flush) |
| 205 | vctx->base.flush(&vctx->base, NULL, 0); |
| 206 | |
Alexandros Frantzis | f38cdae | 2019-05-23 21:16:48 +0300 | [diff] [blame] | 207 | /* If we are not allowed to block, and we know that we will have to wait, |
| 208 | * either because the resource is busy, or because it will become busy due |
| 209 | * to a readback, return early to avoid performing an incomplete |
| 210 | * transfer_get. Such an incomplete transfer_get may finish at any time, |
| 211 | * during which another unsynchronized map could write to the resource |
| 212 | * contents, leaving the contents in an undefined state. |
| 213 | */ |
| 214 | if ((xfer->base.usage & PIPE_TRANSFER_DONTBLOCK) && |
| 215 | (readback || (wait && vws->resource_is_busy(vws, res->hw_res)))) |
| 216 | return VIRGL_TRANSFER_MAP_ERROR; |
| 217 | |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 218 | if (readback) { |
| 219 | vws->transfer_get(vws, res->hw_res, &xfer->base.box, xfer->base.stride, |
| 220 | xfer->l_stride, xfer->offset, xfer->base.level); |
| 221 | } |
| 222 | |
Alexandros Frantzis | f38cdae | 2019-05-23 21:16:48 +0300 | [diff] [blame] | 223 | if (wait) |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 224 | vws->resource_wait(vws, res->hw_res); |
Chia-I Wu | 659c580 | 2019-05-10 21:23:12 -0700 | [diff] [blame] | 225 | |
| 226 | return map_type; |
Chia-I Wu | e87186f | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 227 | } |
| 228 | |
Alexandros Frantzis | 4271430 | 2019-07-05 16:08:43 +0300 | [diff] [blame] | 229 | /* Calculate the minimum size of the memory required to service a resource |
| 230 | * transfer map. Also return the stride and layer_stride for the corresponding |
| 231 | * layout. |
| 232 | */ |
| 233 | static unsigned |
| 234 | virgl_transfer_map_size(struct virgl_transfer *vtransfer, |
| 235 | unsigned *out_stride, |
| 236 | unsigned *out_layer_stride) |
| 237 | { |
| 238 | struct pipe_resource *pres = vtransfer->base.resource; |
| 239 | struct pipe_box *box = &vtransfer->base.box; |
| 240 | unsigned stride; |
| 241 | unsigned layer_stride; |
| 242 | unsigned size; |
| 243 | |
| 244 | assert(out_stride); |
| 245 | assert(out_layer_stride); |
| 246 | |
| 247 | stride = util_format_get_stride(pres->format, box->width); |
| 248 | layer_stride = util_format_get_2d_size(pres->format, stride, box->height); |
| 249 | |
| 250 | if (pres->target == PIPE_TEXTURE_CUBE || |
| 251 | pres->target == PIPE_TEXTURE_CUBE_ARRAY || |
| 252 | pres->target == PIPE_TEXTURE_3D || |
| 253 | pres->target == PIPE_TEXTURE_2D_ARRAY) { |
| 254 | size = box->depth * layer_stride; |
| 255 | } else if (pres->target == PIPE_TEXTURE_1D_ARRAY) { |
| 256 | size = box->depth * stride; |
| 257 | } else { |
| 258 | size = layer_stride; |
| 259 | } |
| 260 | |
| 261 | *out_stride = stride; |
| 262 | *out_layer_stride = layer_stride; |
| 263 | |
| 264 | return size; |
| 265 | } |
| 266 | |
| 267 | /* Maps a region from staging to service the transfer. */ |
| 268 | static void * |
| 269 | virgl_staging_map(struct virgl_context *vctx, |
| 270 | struct virgl_transfer *vtransfer) |
| 271 | { |
| 272 | struct virgl_resource *vres = virgl_resource(vtransfer->base.resource); |
| 273 | unsigned size; |
| 274 | unsigned align_offset; |
| 275 | unsigned stride; |
| 276 | unsigned layer_stride; |
| 277 | void *map_addr; |
| 278 | bool alloc_succeeded; |
| 279 | |
| 280 | assert(vctx->supports_staging); |
| 281 | |
| 282 | size = virgl_transfer_map_size(vtransfer, &stride, &layer_stride); |
| 283 | |
| 284 | /* For buffers we need to ensure that the start of the buffer would be |
| 285 | * aligned to VIRGL_MAP_BUFFER_ALIGNMENT, even if our transfer doesn't |
| 286 | * actually include it. To achieve this we may need to allocate a slightly |
| 287 | * larger range from the upload buffer, and later update the uploader |
| 288 | * resource offset and map address to point to the requested x coordinate |
| 289 | * within that range. |
| 290 | * |
| 291 | * 0 A 2A 3A |
| 292 | * |-------|---bbbb|bbbbb--| |
| 293 | * |--------| ==> size |
| 294 | * |---| ==> align_offset |
| 295 | * |------------| ==> allocation of size + align_offset |
| 296 | */ |
| 297 | align_offset = vres->u.b.target == PIPE_BUFFER ? |
| 298 | vtransfer->base.box.x % VIRGL_MAP_BUFFER_ALIGNMENT : |
| 299 | 0; |
| 300 | |
| 301 | alloc_succeeded = |
| 302 | virgl_staging_alloc(&vctx->staging, size + align_offset, |
| 303 | VIRGL_MAP_BUFFER_ALIGNMENT, |
| 304 | &vtransfer->copy_src_offset, |
| 305 | &vtransfer->copy_src_hw_res, |
| 306 | &map_addr); |
| 307 | if (alloc_succeeded) { |
| 308 | /* Update source offset and address to point to the requested x coordinate |
| 309 | * if we have an align_offset (see above for more information). */ |
| 310 | vtransfer->copy_src_offset += align_offset; |
| 311 | map_addr += align_offset; |
| 312 | |
| 313 | /* Mark as dirty, since we are updating the host side resource |
| 314 | * without going through the corresponding guest side resource, and |
| 315 | * hence the two will diverge. |
| 316 | */ |
| 317 | virgl_resource_dirty(vres, vtransfer->base.level); |
| 318 | |
| 319 | /* We are using the minimum required size to hold the contents, |
| 320 | * possibly using a layout different from the layout of the resource, |
| 321 | * so update the transfer strides accordingly. |
| 322 | */ |
| 323 | vtransfer->base.stride = stride; |
| 324 | vtransfer->base.layer_stride = layer_stride; |
| 325 | |
| 326 | /* Track the total size of active staging resources. */ |
| 327 | vctx->queued_staging_res_size += size + align_offset; |
| 328 | } |
| 329 | |
| 330 | return map_addr; |
| 331 | } |
| 332 | |
| 333 | static bool |
| 334 | virgl_resource_realloc(struct virgl_context *vctx, struct virgl_resource *res) |
| 335 | { |
| 336 | struct virgl_screen *vs = virgl_screen(vctx->base.screen); |
| 337 | const struct pipe_resource *templ = &res->u.b; |
| 338 | unsigned vbind; |
| 339 | struct virgl_hw_res *hw_res; |
| 340 | |
| 341 | vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags); |
| 342 | hw_res = vs->vws->resource_create(vs->vws, |
| 343 | templ->target, |
| 344 | templ->format, |
| 345 | vbind, |
| 346 | templ->width0, |
| 347 | templ->height0, |
| 348 | templ->depth0, |
| 349 | templ->array_size, |
| 350 | templ->last_level, |
| 351 | templ->nr_samples, |
| 352 | res->metadata.total_size); |
| 353 | if (!hw_res) |
| 354 | return false; |
| 355 | |
| 356 | vs->vws->resource_reference(vs->vws, &res->hw_res, NULL); |
| 357 | res->hw_res = hw_res; |
| 358 | |
| 359 | /* We can safely clear the range here, since it will be repopulated in the |
| 360 | * following rebind operation, according to the active buffer binds. |
| 361 | */ |
| 362 | util_range_set_empty(&res->valid_buffer_range); |
| 363 | |
| 364 | /* count toward the staging resource size limit */ |
| 365 | vctx->queued_staging_res_size += res->metadata.total_size; |
| 366 | |
| 367 | virgl_rebind_resource(vctx, &res->u.b); |
| 368 | |
| 369 | return true; |
| 370 | } |
| 371 | |
Alexandros Frantzis | bb0a38d | 2019-07-05 14:22:16 +0300 | [diff] [blame] | 372 | void * |
| 373 | virgl_resource_transfer_map(struct pipe_context *ctx, |
| 374 | struct pipe_resource *resource, |
| 375 | unsigned level, |
| 376 | unsigned usage, |
| 377 | const struct pipe_box *box, |
| 378 | struct pipe_transfer **transfer) |
| 379 | { |
| 380 | struct virgl_context *vctx = virgl_context(ctx); |
| 381 | struct virgl_winsys *vws = virgl_screen(ctx->screen)->vws; |
| 382 | struct virgl_resource *vres = virgl_resource(resource); |
| 383 | struct virgl_transfer *trans; |
| 384 | enum virgl_transfer_map_type map_type; |
| 385 | void *map_addr; |
| 386 | |
| 387 | /* Multisampled resources require resolve before mapping. */ |
| 388 | assert(resource->nr_samples <= 1); |
| 389 | |
| 390 | trans = virgl_resource_create_transfer(vctx, resource, |
| 391 | &vres->metadata, level, usage, box); |
| 392 | |
| 393 | map_type = virgl_resource_transfer_prepare(vctx, trans); |
| 394 | switch (map_type) { |
| 395 | case VIRGL_TRANSFER_MAP_REALLOC: |
| 396 | if (!virgl_resource_realloc(vctx, vres)) { |
| 397 | map_addr = NULL; |
| 398 | break; |
| 399 | } |
| 400 | vws->resource_reference(vws, &trans->hw_res, vres->hw_res); |
| 401 | /* fall through */ |
| 402 | case VIRGL_TRANSFER_MAP_HW_RES: |
| 403 | trans->hw_res_map = vws->resource_map(vws, vres->hw_res); |
| 404 | if (trans->hw_res_map) |
| 405 | map_addr = trans->hw_res_map + trans->offset; |
| 406 | else |
| 407 | map_addr = NULL; |
| 408 | break; |
| 409 | case VIRGL_TRANSFER_MAP_STAGING: |
| 410 | map_addr = virgl_staging_map(vctx, trans); |
| 411 | /* Copy transfers don't make use of hw_res_map at the moment. */ |
| 412 | trans->hw_res_map = NULL; |
| 413 | break; |
| 414 | case VIRGL_TRANSFER_MAP_ERROR: |
| 415 | default: |
| 416 | trans->hw_res_map = NULL; |
| 417 | map_addr = NULL; |
| 418 | break; |
| 419 | } |
| 420 | |
| 421 | if (!map_addr) { |
| 422 | virgl_resource_destroy_transfer(vctx, trans); |
| 423 | return NULL; |
| 424 | } |
| 425 | |
| 426 | if (vres->u.b.target == PIPE_BUFFER) { |
| 427 | /* For the checks below to be able to use 'usage', we assume that |
| 428 | * transfer preparation doesn't affect the usage. |
| 429 | */ |
| 430 | assert(usage == trans->base.usage); |
| 431 | |
| 432 | /* If we are doing a whole resource discard with a hw_res map, the buffer |
| 433 | * storage can now be considered unused and we don't care about previous |
| 434 | * contents. We can thus mark the storage as uninitialized, but only if |
| 435 | * the buffer is not host writable (in which case we can't clear the |
| 436 | * valid range, since that would result in missed readbacks in future |
| 437 | * transfers). We only do this for VIRGL_TRANSFER_MAP_HW_RES, since for |
| 438 | * VIRGL_TRANSFER_MAP_REALLOC we already take care of the buffer range |
| 439 | * when reallocating and rebinding, and VIRGL_TRANSFER_MAP_STAGING is not |
| 440 | * currently used for whole resource discards. |
| 441 | */ |
| 442 | if (map_type == VIRGL_TRANSFER_MAP_HW_RES && |
| 443 | (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) && |
| 444 | (vres->clean_mask & 1)) { |
| 445 | util_range_set_empty(&vres->valid_buffer_range); |
| 446 | } |
| 447 | |
| 448 | if (usage & PIPE_TRANSFER_WRITE) |
Marek Olšák | 732ea0b | 2019-09-25 21:38:40 -0400 | [diff] [blame] | 449 | util_range_add(&vres->u.b, &vres->valid_buffer_range, box->x, box->x + box->width); |
Alexandros Frantzis | bb0a38d | 2019-07-05 14:22:16 +0300 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | *transfer = &trans->base; |
| 453 | return map_addr; |
| 454 | } |
| 455 | |
Gurchetan Singh | 9bde8f3 | 2019-09-25 10:33:16 -0700 | [diff] [blame] | 456 | static void virgl_resource_layout(struct pipe_resource *pt, |
| 457 | struct virgl_resource_metadata *metadata, |
| 458 | uint32_t plane, |
| 459 | uint32_t winsys_stride, |
| 460 | uint32_t plane_offset, |
| 461 | uint32_t modifier) |
| 462 | { |
| 463 | unsigned level, nblocksy; |
| 464 | unsigned width = pt->width0; |
| 465 | unsigned height = pt->height0; |
| 466 | unsigned depth = pt->depth0; |
| 467 | unsigned buffer_size = 0; |
| 468 | |
| 469 | for (level = 0; level <= pt->last_level; level++) { |
| 470 | unsigned slices; |
| 471 | |
| 472 | if (pt->target == PIPE_TEXTURE_CUBE) |
| 473 | slices = 6; |
| 474 | else if (pt->target == PIPE_TEXTURE_3D) |
| 475 | slices = depth; |
| 476 | else |
| 477 | slices = pt->array_size; |
| 478 | |
| 479 | nblocksy = util_format_get_nblocksy(pt->format, height); |
| 480 | metadata->stride[level] = winsys_stride ? winsys_stride : |
| 481 | util_format_get_stride(pt->format, width); |
| 482 | metadata->layer_stride[level] = nblocksy * metadata->stride[level]; |
| 483 | metadata->level_offset[level] = buffer_size; |
| 484 | |
| 485 | buffer_size += slices * metadata->layer_stride[level]; |
| 486 | |
| 487 | width = u_minify(width, 1); |
| 488 | height = u_minify(height, 1); |
| 489 | depth = u_minify(depth, 1); |
| 490 | } |
| 491 | |
| 492 | metadata->plane = plane; |
| 493 | metadata->plane_offset = plane_offset; |
| 494 | metadata->modifier = modifier; |
| 495 | if (pt->nr_samples <= 1) |
| 496 | metadata->total_size = buffer_size; |
| 497 | else /* don't create guest backing store for MSAA */ |
| 498 | metadata->total_size = 0; |
| 499 | } |
| 500 | |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 501 | static struct pipe_resource *virgl_resource_create(struct pipe_screen *screen, |
| 502 | const struct pipe_resource *templ) |
| 503 | { |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 504 | unsigned vbind; |
| 505 | struct virgl_screen *vs = virgl_screen(screen); |
| 506 | struct virgl_resource *res = CALLOC_STRUCT(virgl_resource); |
| 507 | |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 508 | res->u.b = *templ; |
| 509 | res->u.b.screen = &vs->base; |
| 510 | pipe_reference_init(&res->u.b.reference, 1); |
Alexandros Frantzis | 636345f | 2019-05-20 13:00:38 +0300 | [diff] [blame] | 511 | vbind = pipe_to_virgl_bind(vs, templ->bind, templ->flags); |
Gurchetan Singh | 9bde8f3 | 2019-09-25 10:33:16 -0700 | [diff] [blame] | 512 | virgl_resource_layout(&res->u.b, &res->metadata, 0, 0, 0, 0); |
Gert Wollny | 13d4a34 | 2019-05-27 16:28:44 +0200 | [diff] [blame] | 513 | |
| 514 | if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_APP_TWEAK_SUPPORT) && |
| 515 | vs->tweak_gles_emulate_bgra && |
| 516 | (templ->format == PIPE_FORMAT_B8G8R8A8_SRGB || |
| 517 | templ->format == PIPE_FORMAT_B8G8R8A8_UNORM || |
| 518 | templ->format == PIPE_FORMAT_B8G8R8X8_SRGB || |
| 519 | templ->format == PIPE_FORMAT_B8G8R8X8_UNORM)) { |
| 520 | vbind |= VIRGL_BIND_PREFER_EMULATED_BGRA; |
| 521 | } |
| 522 | |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 523 | res->hw_res = vs->vws->resource_create(vs->vws, templ->target, |
| 524 | templ->format, vbind, |
| 525 | templ->width0, |
| 526 | templ->height0, |
| 527 | templ->depth0, |
| 528 | templ->array_size, |
| 529 | templ->last_level, |
| 530 | templ->nr_samples, |
| 531 | res->metadata.total_size); |
| 532 | if (!res->hw_res) { |
| 533 | FREE(res); |
| 534 | return NULL; |
| 535 | } |
| 536 | |
Gurchetan Singh | d6dc68e | 2019-03-13 22:58:22 +0000 | [diff] [blame] | 537 | res->clean_mask = (1 << VR_MAX_TEXTURE_2D_LEVELS) - 1; |
Gurchetan Singh | 1d294ad | 2019-01-11 15:37:15 -0800 | [diff] [blame] | 538 | |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 539 | if (templ->target == PIPE_BUFFER) { |
| 540 | util_range_init(&res->valid_buffer_range); |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 541 | virgl_buffer_init(res); |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 542 | } else { |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 543 | virgl_texture_init(res); |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 544 | } |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 545 | |
| 546 | return &res->u.b; |
| 547 | |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 548 | } |
| 549 | |
| 550 | static struct pipe_resource *virgl_resource_from_handle(struct pipe_screen *screen, |
| 551 | const struct pipe_resource *templ, |
Marek Olšák | 82db518 | 2016-02-24 18:51:15 +0100 | [diff] [blame] | 552 | struct winsys_handle *whandle, |
| 553 | unsigned usage) |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 554 | { |
Gurchetan Singh | aad4127 | 2019-09-25 10:06:23 -0700 | [diff] [blame] | 555 | uint32_t winsys_stride, plane_offset, plane; |
| 556 | uint64_t modifier; |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 557 | struct virgl_screen *vs = virgl_screen(screen); |
| 558 | if (templ->target == PIPE_BUFFER) |
| 559 | return NULL; |
| 560 | |
| 561 | struct virgl_resource *res = CALLOC_STRUCT(virgl_resource); |
| 562 | res->u.b = *templ; |
| 563 | res->u.b.screen = &vs->base; |
| 564 | pipe_reference_init(&res->u.b.reference, 1); |
| 565 | |
Gurchetan Singh | aad4127 | 2019-09-25 10:06:23 -0700 | [diff] [blame] | 566 | plane = winsys_stride = plane_offset = modifier = 0; |
| 567 | res->hw_res = vs->vws->resource_create_from_handle(vs->vws, whandle, |
| 568 | &plane, |
| 569 | &winsys_stride, |
| 570 | &plane_offset, |
| 571 | &modifier); |
Gurchetan Singh | 9bde8f3 | 2019-09-25 10:33:16 -0700 | [diff] [blame] | 572 | |
| 573 | virgl_resource_layout(&res->u.b, &res->metadata, plane, winsys_stride, |
| 574 | plane_offset, modifier); |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 575 | if (!res->hw_res) { |
| 576 | FREE(res); |
| 577 | return NULL; |
| 578 | } |
| 579 | |
| 580 | virgl_texture_init(res); |
| 581 | |
| 582 | return &res->u.b; |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 583 | } |
| 584 | |
| 585 | void virgl_init_screen_resource_functions(struct pipe_screen *screen) |
| 586 | { |
| 587 | screen->resource_create = virgl_resource_create; |
| 588 | screen->resource_from_handle = virgl_resource_from_handle; |
| 589 | screen->resource_get_handle = u_resource_get_handle_vtbl; |
| 590 | screen->resource_destroy = u_resource_destroy_vtbl; |
| 591 | } |
| 592 | |
Marek Olšák | 1ffe77e | 2016-07-16 21:19:48 +0200 | [diff] [blame] | 593 | static void virgl_buffer_subdata(struct pipe_context *pipe, |
| 594 | struct pipe_resource *resource, |
| 595 | unsigned usage, unsigned offset, |
| 596 | unsigned size, const void *data) |
| 597 | { |
Chia-I Wu | d31d25f | 2019-07-16 16:48:03 -0700 | [diff] [blame] | 598 | struct virgl_context *vctx = virgl_context(pipe); |
| 599 | struct virgl_resource *vbuf = virgl_resource(resource); |
Marek Olšák | 1ffe77e | 2016-07-16 21:19:48 +0200 | [diff] [blame] | 600 | |
Chia-I Wu | d31d25f | 2019-07-16 16:48:03 -0700 | [diff] [blame] | 601 | /* We can try virgl_transfer_queue_extend_buffer when there is no |
| 602 | * flush/readback/wait required. Based on virgl_resource_transfer_prepare, |
| 603 | * the simplest way to make sure that is the case is to check the valid |
| 604 | * buffer range. |
| 605 | */ |
| 606 | if (!util_ranges_intersect(&vbuf->valid_buffer_range, |
| 607 | offset, offset + size) && |
| 608 | likely(!(virgl_debug & VIRGL_DEBUG_XFER)) && |
| 609 | virgl_transfer_queue_extend_buffer(&vctx->queue, |
| 610 | vbuf->hw_res, offset, size, data)) { |
Marek Olšák | 732ea0b | 2019-09-25 21:38:40 -0400 | [diff] [blame] | 611 | util_range_add(&vbuf->u.b, &vbuf->valid_buffer_range, offset, offset + size); |
David Riley | dec68e3 | 2019-04-24 16:12:48 -0700 | [diff] [blame] | 612 | return; |
Chia-I Wu | c707839 | 2019-05-03 10:40:38 -0700 | [diff] [blame] | 613 | } |
Chia-I Wu | d31d25f | 2019-07-16 16:48:03 -0700 | [diff] [blame] | 614 | |
| 615 | u_default_buffer_subdata(pipe, resource, usage, offset, size, data); |
Marek Olšák | 1ffe77e | 2016-07-16 21:19:48 +0200 | [diff] [blame] | 616 | } |
| 617 | |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 618 | void virgl_init_context_resource_functions(struct pipe_context *ctx) |
| 619 | { |
| 620 | ctx->transfer_map = u_transfer_map_vtbl; |
| 621 | ctx->transfer_flush_region = u_transfer_flush_region_vtbl; |
| 622 | ctx->transfer_unmap = u_transfer_unmap_vtbl; |
Marek Olšák | 1ffe77e | 2016-07-16 21:19:48 +0200 | [diff] [blame] | 623 | ctx->buffer_subdata = virgl_buffer_subdata; |
| 624 | ctx->texture_subdata = u_default_texture_subdata; |
Dave Airlie | a8987b8 | 2015-01-22 15:11:47 +1000 | [diff] [blame] | 625 | } |
Gurchetan Singh | f749229 | 2018-11-09 16:27:32 -0800 | [diff] [blame] | 626 | |
Gurchetan Singh | 2a44acc | 2018-11-09 16:40:03 -0800 | [diff] [blame] | 627 | |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 628 | struct virgl_transfer * |
Chia-I Wu | 74051ef | 2019-05-15 15:46:40 -0700 | [diff] [blame] | 629 | virgl_resource_create_transfer(struct virgl_context *vctx, |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 630 | struct pipe_resource *pres, |
| 631 | const struct virgl_resource_metadata *metadata, |
| 632 | unsigned level, unsigned usage, |
| 633 | const struct pipe_box *box) |
Gurchetan Singh | 2a44acc | 2018-11-09 16:40:03 -0800 | [diff] [blame] | 634 | { |
Chia-I Wu | 7e0508d | 2019-05-15 15:34:44 -0700 | [diff] [blame] | 635 | struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 636 | struct virgl_transfer *trans; |
| 637 | enum pipe_format format = pres->format; |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 638 | const unsigned blocksy = box->y / util_format_get_blockheight(format); |
| 639 | const unsigned blocksx = box->x / util_format_get_blockwidth(format); |
Gurchetan Singh | 2a44acc | 2018-11-09 16:40:03 -0800 | [diff] [blame] | 640 | |
Gurchetan Singh | 9bde8f3 | 2019-09-25 10:33:16 -0700 | [diff] [blame] | 641 | unsigned offset = metadata->plane_offset + metadata->level_offset[level]; |
Gurchetan Singh | 2a44acc | 2018-11-09 16:40:03 -0800 | [diff] [blame] | 642 | if (pres->target == PIPE_TEXTURE_CUBE || |
| 643 | pres->target == PIPE_TEXTURE_CUBE_ARRAY || |
| 644 | pres->target == PIPE_TEXTURE_3D || |
| 645 | pres->target == PIPE_TEXTURE_2D_ARRAY) { |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 646 | offset += box->z * metadata->layer_stride[level]; |
Gurchetan Singh | 2a44acc | 2018-11-09 16:40:03 -0800 | [diff] [blame] | 647 | } |
| 648 | else if (pres->target == PIPE_TEXTURE_1D_ARRAY) { |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 649 | offset += box->z * metadata->stride[level]; |
Gurchetan Singh | 4e2c77c | 2018-11-30 18:08:14 -0800 | [diff] [blame] | 650 | assert(box->y == 0); |
| 651 | } else if (pres->target == PIPE_BUFFER) { |
| 652 | assert(box->y == 0 && box->z == 0); |
| 653 | } else { |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 654 | assert(box->z == 0); |
Gurchetan Singh | 2a44acc | 2018-11-09 16:40:03 -0800 | [diff] [blame] | 655 | } |
| 656 | |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 657 | offset += blocksy * metadata->stride[level]; |
| 658 | offset += blocksx * util_format_get_blocksize(format); |
| 659 | |
Chia-I Wu | 74051ef | 2019-05-15 15:46:40 -0700 | [diff] [blame] | 660 | trans = slab_alloc(&vctx->transfer_pool); |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 661 | if (!trans) |
| 662 | return NULL; |
| 663 | |
Chia-I Wu | 900a80f | 2019-05-15 15:38:49 -0700 | [diff] [blame] | 664 | /* note that trans is not zero-initialized */ |
| 665 | trans->base.resource = NULL; |
| 666 | pipe_resource_reference(&trans->base.resource, pres); |
Chia-I Wu | 7e0508d | 2019-05-15 15:34:44 -0700 | [diff] [blame] | 667 | trans->hw_res = NULL; |
| 668 | vws->resource_reference(vws, &trans->hw_res, virgl_resource(pres)->hw_res); |
| 669 | |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 670 | trans->base.level = level; |
| 671 | trans->base.usage = usage; |
| 672 | trans->base.box = *box; |
| 673 | trans->base.stride = metadata->stride[level]; |
| 674 | trans->base.layer_stride = metadata->layer_stride[level]; |
| 675 | trans->offset = offset; |
| 676 | util_range_init(&trans->range); |
Alexandros Frantzis | 6a03f25 | 2019-06-26 12:12:17 +0300 | [diff] [blame] | 677 | trans->copy_src_hw_res = NULL; |
Alexandros Frantzis | 6e7726e | 2019-05-08 16:17:53 +0300 | [diff] [blame] | 678 | trans->copy_src_offset = 0; |
Alexandros Frantzis | e5b54d0 | 2019-07-05 14:27:11 +0300 | [diff] [blame] | 679 | trans->resolve_transfer = NULL; |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 680 | |
| 681 | if (trans->base.resource->target != PIPE_TEXTURE_3D && |
| 682 | trans->base.resource->target != PIPE_TEXTURE_CUBE && |
| 683 | trans->base.resource->target != PIPE_TEXTURE_1D_ARRAY && |
| 684 | trans->base.resource->target != PIPE_TEXTURE_2D_ARRAY && |
| 685 | trans->base.resource->target != PIPE_TEXTURE_CUBE_ARRAY) |
| 686 | trans->l_stride = 0; |
| 687 | else |
| 688 | trans->l_stride = trans->base.layer_stride; |
| 689 | |
| 690 | return trans; |
| 691 | } |
| 692 | |
Chia-I Wu | 74051ef | 2019-05-15 15:46:40 -0700 | [diff] [blame] | 693 | void virgl_resource_destroy_transfer(struct virgl_context *vctx, |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 694 | struct virgl_transfer *trans) |
| 695 | { |
Chia-I Wu | 7e0508d | 2019-05-15 15:34:44 -0700 | [diff] [blame] | 696 | struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws; |
| 697 | |
Alexandros Frantzis | 6a03f25 | 2019-06-26 12:12:17 +0300 | [diff] [blame] | 698 | vws->resource_reference(vws, &trans->copy_src_hw_res, NULL); |
Chia-I Wu | 900a80f | 2019-05-15 15:38:49 -0700 | [diff] [blame] | 699 | |
Gurchetan Singh | 174f530 | 2018-11-30 14:54:33 -0800 | [diff] [blame] | 700 | util_range_destroy(&trans->range); |
Chia-I Wu | 7e0508d | 2019-05-15 15:34:44 -0700 | [diff] [blame] | 701 | vws->resource_reference(vws, &trans->hw_res, NULL); |
Chia-I Wu | 900a80f | 2019-05-15 15:38:49 -0700 | [diff] [blame] | 702 | pipe_resource_reference(&trans->base.resource, NULL); |
Chia-I Wu | 74051ef | 2019-05-15 15:46:40 -0700 | [diff] [blame] | 703 | slab_free(&vctx->transfer_pool, trans); |
Gurchetan Singh | 2a44acc | 2018-11-09 16:40:03 -0800 | [diff] [blame] | 704 | } |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 705 | |
| 706 | void virgl_resource_destroy(struct pipe_screen *screen, |
| 707 | struct pipe_resource *resource) |
| 708 | { |
| 709 | struct virgl_screen *vs = virgl_screen(screen); |
| 710 | struct virgl_resource *res = virgl_resource(resource); |
Chia-I Wu | 96c2851 | 2019-05-09 13:27:34 -0700 | [diff] [blame] | 711 | |
| 712 | if (res->u.b.target == PIPE_BUFFER) |
| 713 | util_range_destroy(&res->valid_buffer_range); |
| 714 | |
Chia-I Wu | ad1ef35 | 2019-05-15 15:28:52 -0700 | [diff] [blame] | 715 | vs->vws->resource_reference(vs->vws, &res->hw_res, NULL); |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 716 | FREE(res); |
| 717 | } |
| 718 | |
Ilia Mirkin | 0e30c6b | 2019-07-04 11:41:41 -0400 | [diff] [blame] | 719 | bool virgl_resource_get_handle(struct pipe_screen *screen, |
| 720 | struct pipe_resource *resource, |
| 721 | struct winsys_handle *whandle) |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 722 | { |
| 723 | struct virgl_screen *vs = virgl_screen(screen); |
| 724 | struct virgl_resource *res = virgl_resource(resource); |
| 725 | |
| 726 | if (res->u.b.target == PIPE_BUFFER) |
Ilia Mirkin | 0e30c6b | 2019-07-04 11:41:41 -0400 | [diff] [blame] | 727 | return false; |
Gurchetan Singh | b45aa62 | 2018-12-03 15:16:43 -0800 | [diff] [blame] | 728 | |
| 729 | return vs->vws->resource_get_handle(vs->vws, res->hw_res, |
| 730 | res->metadata.stride[0], |
| 731 | whandle); |
| 732 | } |
Gurchetan Singh | 5b6a2ae | 2019-01-11 17:29:49 -0800 | [diff] [blame] | 733 | |
| 734 | void virgl_resource_dirty(struct virgl_resource *res, uint32_t level) |
| 735 | { |
Gurchetan Singh | 7626e6e | 2019-01-11 17:38:55 -0800 | [diff] [blame] | 736 | if (res) { |
| 737 | if (res->u.b.target == PIPE_BUFFER) |
Gurchetan Singh | d6dc68e | 2019-03-13 22:58:22 +0000 | [diff] [blame] | 738 | res->clean_mask &= ~1; |
Gurchetan Singh | 7626e6e | 2019-01-11 17:38:55 -0800 | [diff] [blame] | 739 | else |
Gurchetan Singh | d6dc68e | 2019-03-13 22:58:22 +0000 | [diff] [blame] | 740 | res->clean_mask &= ~(1 << level); |
Gurchetan Singh | 7626e6e | 2019-01-11 17:38:55 -0800 | [diff] [blame] | 741 | } |
Gurchetan Singh | 5b6a2ae | 2019-01-11 17:29:49 -0800 | [diff] [blame] | 742 | } |