Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 1 | /* |
Daniele Castagna | 7a755de | 2016-12-16 17:32:30 -0500 | [diff] [blame] | 2 | * Copyright 2016 The Chromium OS Authors. All rights reserved. |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 3 | * Use of this source code is governed by a BSD-style license that can be |
| 4 | * found in the LICENSE file. |
| 5 | */ |
| 6 | #ifdef DRV_AMDGPU |
| 7 | #include <errno.h> |
| 8 | #include <stdio.h> |
| 9 | #include <stdlib.h> |
| 10 | #include <string.h> |
Pratik Vishwakarma | bc1b535 | 2016-12-12 14:22:10 +0530 | [diff] [blame] | 11 | #include <sys/mman.h> |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 12 | #include <xf86drm.h> |
| 13 | #include <amdgpu_drm.h> |
| 14 | #include <amdgpu.h> |
| 15 | |
| 16 | #include "addrinterface.h" |
| 17 | #include "drv_priv.h" |
| 18 | #include "helpers.h" |
| 19 | #include "util.h" |
| 20 | |
| 21 | #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND |
| 22 | #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A |
| 23 | #endif |
| 24 | |
| 25 | #define mmCC_RB_BACKEND_DISABLE 0x263d |
| 26 | #define mmGB_TILE_MODE0 0x2644 |
| 27 | #define mmGB_MACROTILE_MODE0 0x2664 |
| 28 | #define mmGB_ADDR_CONFIG 0x263e |
| 29 | #define mmMC_ARB_RAMCFG 0x9d8 |
| 30 | |
| 31 | enum { |
| 32 | FAMILY_UNKNOWN, |
| 33 | FAMILY_SI, |
| 34 | FAMILY_CI, |
| 35 | FAMILY_KV, |
| 36 | FAMILY_VI, |
| 37 | FAMILY_CZ, |
| 38 | FAMILY_PI, |
| 39 | FAMILY_LAST, |
| 40 | }; |
| 41 | |
Gurchetan Singh | 6b41fb5 | 2017-03-01 20:14:39 -0800 | [diff] [blame^] | 42 | const static uint32_t supported_formats[] = { |
| 43 | DRM_FORMAT_ARGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 |
Gurchetan Singh | 179687e | 2016-10-28 10:07:35 -0700 | [diff] [blame] | 44 | }; |
| 45 | |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 46 | static int amdgpu_set_metadata(int fd, uint32_t handle, |
| 47 | struct amdgpu_bo_metadata *info) |
| 48 | { |
| 49 | struct drm_amdgpu_gem_metadata args = {0}; |
| 50 | |
| 51 | if (!info) |
| 52 | return -EINVAL; |
| 53 | |
| 54 | args.handle = handle; |
| 55 | args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA; |
| 56 | args.data.flags = info->flags; |
| 57 | args.data.tiling_info = info->tiling_info; |
| 58 | |
| 59 | if (info->size_metadata > sizeof(args.data.data)) |
| 60 | return -EINVAL; |
| 61 | |
| 62 | if (info->size_metadata) { |
| 63 | args.data.data_size_bytes = info->size_metadata; |
| 64 | memcpy(args.data.data, info->umd_metadata, info->size_metadata); |
| 65 | } |
| 66 | |
| 67 | return drmCommandWriteRead(fd, DRM_AMDGPU_GEM_METADATA, &args, |
| 68 | sizeof(args)); |
| 69 | } |
| 70 | |
| 71 | static int amdgpu_read_mm_regs(int fd, unsigned dword_offset, |
| 72 | unsigned count, uint32_t instance, |
| 73 | uint32_t flags, uint32_t *values) |
| 74 | { |
| 75 | struct drm_amdgpu_info request; |
| 76 | |
| 77 | memset(&request, 0, sizeof(request)); |
| 78 | request.return_pointer = (uintptr_t) values; |
| 79 | request.return_size = count * sizeof(uint32_t); |
| 80 | request.query = AMDGPU_INFO_READ_MMR_REG; |
| 81 | request.read_mmr_reg.dword_offset = dword_offset; |
| 82 | request.read_mmr_reg.count = count; |
| 83 | request.read_mmr_reg.instance = instance; |
| 84 | request.read_mmr_reg.flags = flags; |
| 85 | |
| 86 | return drmCommandWrite(fd, DRM_AMDGPU_INFO, &request, |
| 87 | sizeof(struct drm_amdgpu_info)); |
| 88 | } |
| 89 | |
| 90 | static int amdgpu_query_gpu(int fd, struct amdgpu_gpu_info *gpu_info) |
| 91 | { |
| 92 | int ret; |
| 93 | uint32_t instance; |
| 94 | |
| 95 | if (!gpu_info) |
| 96 | return -EINVAL; |
| 97 | |
| 98 | instance = AMDGPU_INFO_MMR_SH_INDEX_MASK << |
| 99 | AMDGPU_INFO_MMR_SH_INDEX_SHIFT; |
| 100 | |
| 101 | ret = amdgpu_read_mm_regs(fd, mmCC_RB_BACKEND_DISABLE, 1, instance, 0, |
| 102 | &gpu_info->backend_disable[0]); |
| 103 | if (ret) |
| 104 | return ret; |
| 105 | /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */ |
| 106 | gpu_info->backend_disable[0] = |
| 107 | (gpu_info->backend_disable[0] >> 16) & 0xff; |
| 108 | |
| 109 | ret = amdgpu_read_mm_regs(fd, mmGB_TILE_MODE0, 32, 0xffffffff, 0, |
| 110 | gpu_info->gb_tile_mode); |
| 111 | if (ret) |
| 112 | return ret; |
| 113 | |
| 114 | ret = amdgpu_read_mm_regs(fd, mmGB_MACROTILE_MODE0, 16, 0xffffffff, 0, |
| 115 | gpu_info->gb_macro_tile_mode); |
| 116 | if (ret) |
| 117 | return ret; |
| 118 | |
| 119 | ret = amdgpu_read_mm_regs(fd, mmGB_ADDR_CONFIG, 1, 0xffffffff, 0, |
| 120 | &gpu_info->gb_addr_cfg); |
| 121 | if (ret) |
| 122 | return ret; |
| 123 | |
| 124 | ret = amdgpu_read_mm_regs(fd, mmMC_ARB_RAMCFG, 1, 0xffffffff, 0, |
| 125 | &gpu_info->mc_arb_ramcfg); |
| 126 | if (ret) |
| 127 | return ret; |
| 128 | |
| 129 | return 0; |
| 130 | } |
| 131 | |
| 132 | static void *ADDR_API alloc_sys_mem(const ADDR_ALLOCSYSMEM_INPUT *in) |
| 133 | { |
| 134 | return malloc(in->sizeInBytes); |
| 135 | } |
| 136 | |
| 137 | static ADDR_E_RETURNCODE ADDR_API free_sys_mem(const ADDR_FREESYSMEM_INPUT *in) |
| 138 | { |
| 139 | free(in->pVirtAddr); |
| 140 | return ADDR_OK; |
| 141 | } |
| 142 | |
| 143 | static int amdgpu_addrlib_compute(void *addrlib, uint32_t width, |
| 144 | uint32_t height, uint32_t format, |
| 145 | uint32_t usage, uint32_t *tiling_flags, |
| 146 | ADDR_COMPUTE_SURFACE_INFO_OUTPUT *addr_out) |
| 147 | { |
| 148 | ADDR_COMPUTE_SURFACE_INFO_INPUT addr_surf_info_in = {0}; |
| 149 | ADDR_TILEINFO addr_tile_info = {0}; |
| 150 | ADDR_TILEINFO addr_tile_info_out = {0}; |
| 151 | |
| 152 | addr_surf_info_in.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT); |
| 153 | |
| 154 | /* Set the requested tiling mode. */ |
| 155 | addr_surf_info_in.tileMode = ADDR_TM_2D_TILED_THIN1; |
Gurchetan Singh | 6b41fb5 | 2017-03-01 20:14:39 -0800 | [diff] [blame^] | 156 | if (usage & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | |
| 157 | BO_USE_SW_WRITE_OFTEN)) |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 158 | addr_surf_info_in.tileMode = ADDR_TM_LINEAR_ALIGNED; |
Gurchetan Singh | 6b41fb5 | 2017-03-01 20:14:39 -0800 | [diff] [blame^] | 159 | else if (width <= 16 || height <= 16) |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 160 | addr_surf_info_in.tileMode = ADDR_TM_1D_TILED_THIN1; |
| 161 | |
| 162 | /* Bits per pixel should be calculated from format*/ |
| 163 | addr_surf_info_in.bpp = drv_bpp_from_format(format, 0); |
| 164 | addr_surf_info_in.numSamples = 1; |
| 165 | addr_surf_info_in.width = width; |
| 166 | addr_surf_info_in.height = height; |
| 167 | addr_surf_info_in.numSlices = 1; |
| 168 | addr_surf_info_in.pTileInfo = &addr_tile_info; |
| 169 | addr_surf_info_in.tileIndex = -1; |
| 170 | |
| 171 | /* This disables incorrect calculations (hacks) in addrlib. */ |
| 172 | addr_surf_info_in.flags.noStencil = 1; |
| 173 | |
| 174 | /* Set the micro tile type. */ |
Gurchetan Singh | 458976f | 2016-11-23 17:32:33 -0800 | [diff] [blame] | 175 | if (usage & BO_USE_SCANOUT) |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 176 | addr_surf_info_in.tileType = ADDR_DISPLAYABLE; |
| 177 | else |
| 178 | addr_surf_info_in.tileType = ADDR_NON_DISPLAYABLE; |
| 179 | |
| 180 | addr_out->size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT); |
| 181 | addr_out->pTileInfo = &addr_tile_info_out; |
| 182 | |
| 183 | if (AddrComputeSurfaceInfo(addrlib, &addr_surf_info_in, |
| 184 | addr_out) != ADDR_OK) |
| 185 | return -EINVAL; |
| 186 | |
| 187 | ADDR_CONVERT_TILEINFOTOHW_INPUT s_in = {0}; |
| 188 | ADDR_CONVERT_TILEINFOTOHW_OUTPUT s_out = {0}; |
| 189 | ADDR_TILEINFO s_tile_hw_info_out = {0}; |
| 190 | |
| 191 | s_in.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_INPUT); |
| 192 | /* Convert from real value to HW value */ |
| 193 | s_in.reverse = 0; |
| 194 | s_in.pTileInfo = &addr_tile_info_out; |
| 195 | s_in.tileIndex = -1; |
| 196 | |
| 197 | s_out.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_OUTPUT); |
| 198 | s_out.pTileInfo = &s_tile_hw_info_out; |
| 199 | |
| 200 | if (AddrConvertTileInfoToHW(addrlib, &s_in, &s_out) != ADDR_OK) |
| 201 | return -EINVAL; |
| 202 | |
| 203 | if (addr_out->tileMode >= ADDR_TM_2D_TILED_THIN1) |
| 204 | /* 2D_TILED_THIN1 */ |
| 205 | *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); |
| 206 | else if (addr_out->tileMode >= ADDR_TM_1D_TILED_THIN1) |
| 207 | /* 1D_TILED_THIN1 */ |
| 208 | *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); |
| 209 | else |
| 210 | /* LINEAR_ALIGNED */ |
| 211 | *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); |
| 212 | |
| 213 | *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, |
| 214 | drv_log_base2(addr_tile_info_out.bankWidth)); |
| 215 | *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, |
| 216 | drv_log_base2(addr_tile_info_out.bankHeight)); |
| 217 | *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, |
| 218 | s_tile_hw_info_out.tileSplitBytes); |
| 219 | *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, |
| 220 | drv_log_base2(addr_tile_info_out.macroAspectRatio)); |
| 221 | *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, |
| 222 | s_tile_hw_info_out.pipeConfig); |
| 223 | *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, s_tile_hw_info_out.banks); |
| 224 | |
| 225 | return 0; |
| 226 | } |
| 227 | |
| 228 | static void *amdgpu_addrlib_init(int fd) |
| 229 | { |
| 230 | int ret; |
| 231 | ADDR_CREATE_INPUT addr_create_input = {0}; |
| 232 | ADDR_CREATE_OUTPUT addr_create_output = {0}; |
| 233 | ADDR_REGISTER_VALUE reg_value = {0}; |
| 234 | ADDR_CREATE_FLAGS create_flags = { {0} }; |
| 235 | ADDR_E_RETURNCODE addr_ret; |
| 236 | |
| 237 | addr_create_input.size = sizeof(ADDR_CREATE_INPUT); |
| 238 | addr_create_output.size = sizeof(ADDR_CREATE_OUTPUT); |
| 239 | |
| 240 | struct amdgpu_gpu_info gpu_info = {0}; |
| 241 | |
| 242 | ret = amdgpu_query_gpu(fd, &gpu_info); |
| 243 | |
| 244 | if (ret) { |
| 245 | fprintf(stderr, "[%s]failed with error =%d\n", __func__, ret); |
| 246 | return NULL; |
| 247 | } |
| 248 | |
| 249 | reg_value.noOfBanks = gpu_info.mc_arb_ramcfg & 0x3; |
| 250 | reg_value.gbAddrConfig = gpu_info.gb_addr_cfg; |
| 251 | reg_value.noOfRanks = (gpu_info.mc_arb_ramcfg & 0x4) >> 2; |
| 252 | |
| 253 | reg_value.backendDisables = gpu_info.backend_disable[0]; |
| 254 | reg_value.pTileConfig = gpu_info.gb_tile_mode; |
| 255 | reg_value.noOfEntries = sizeof(gpu_info.gb_tile_mode) |
| 256 | / sizeof(gpu_info.gb_tile_mode[0]); |
| 257 | reg_value.pMacroTileConfig = gpu_info.gb_macro_tile_mode; |
| 258 | reg_value.noOfMacroEntries = sizeof(gpu_info.gb_macro_tile_mode) |
| 259 | / sizeof(gpu_info.gb_macro_tile_mode[0]); |
| 260 | create_flags.value = 0; |
| 261 | create_flags.useTileIndex = 1; |
| 262 | |
| 263 | addr_create_input.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND; |
| 264 | |
| 265 | addr_create_input.chipFamily = FAMILY_CZ; |
| 266 | addr_create_input.createFlags = create_flags; |
| 267 | addr_create_input.callbacks.allocSysMem = alloc_sys_mem; |
| 268 | addr_create_input.callbacks.freeSysMem = free_sys_mem; |
| 269 | addr_create_input.callbacks.debugPrint = 0; |
| 270 | addr_create_input.regValue = reg_value; |
| 271 | |
| 272 | addr_ret = AddrCreate(&addr_create_input, &addr_create_output); |
| 273 | |
| 274 | if (addr_ret != ADDR_OK) { |
| 275 | fprintf(stderr, "[%s]failed error =%d\n", __func__, addr_ret); |
| 276 | return NULL; |
| 277 | } |
| 278 | |
| 279 | return addr_create_output.hLib; |
| 280 | } |
| 281 | |
| 282 | static int amdgpu_init(struct driver *drv) |
| 283 | { |
Gurchetan Singh | 6b41fb5 | 2017-03-01 20:14:39 -0800 | [diff] [blame^] | 284 | int ret; |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 285 | void *addrlib; |
Gurchetan Singh | 6b41fb5 | 2017-03-01 20:14:39 -0800 | [diff] [blame^] | 286 | struct format_metadata metadata; |
| 287 | uint32_t flags = BO_COMMON_USE_MASK; |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 288 | |
| 289 | addrlib = amdgpu_addrlib_init(drv_get_fd(drv)); |
| 290 | if (!addrlib) |
| 291 | return -1; |
| 292 | |
| 293 | drv->priv = addrlib; |
| 294 | |
Gurchetan Singh | 6b41fb5 | 2017-03-01 20:14:39 -0800 | [diff] [blame^] | 295 | metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED; |
| 296 | metadata.priority = 1; |
| 297 | metadata.modifier = DRM_FORMAT_MOD_NONE; |
| 298 | |
| 299 | ret = drv_add_combinations(drv, supported_formats, |
| 300 | ARRAY_SIZE(supported_formats), &metadata, |
| 301 | flags); |
| 302 | if (ret) |
| 303 | return ret; |
| 304 | |
| 305 | drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, |
| 306 | BO_USE_CURSOR | BO_USE_SCANOUT); |
| 307 | drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, |
| 308 | BO_USE_CURSOR | BO_USE_SCANOUT); |
| 309 | drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, |
| 310 | BO_USE_SCANOUT); |
| 311 | |
| 312 | metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED; |
| 313 | metadata.priority = 2; |
| 314 | metadata.modifier = DRM_FORMAT_MOD_NONE; |
| 315 | |
| 316 | ret = drv_add_combinations(drv, supported_formats, |
| 317 | ARRAY_SIZE(supported_formats), &metadata, |
| 318 | flags); |
| 319 | if (ret) |
| 320 | return ret; |
| 321 | |
| 322 | flags &= ~BO_USE_SW_WRITE_OFTEN; |
| 323 | flags &= ~BO_USE_SW_READ_OFTEN; |
| 324 | flags &= ~BO_USE_LINEAR; |
| 325 | |
| 326 | metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1; |
| 327 | metadata.priority = 3; |
| 328 | |
| 329 | ret = drv_add_combinations(drv, supported_formats, |
| 330 | ARRAY_SIZE(supported_formats), &metadata, |
| 331 | flags); |
| 332 | if (ret) |
| 333 | return ret; |
| 334 | |
| 335 | drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, |
| 336 | BO_USE_SCANOUT); |
| 337 | drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, |
| 338 | BO_USE_SCANOUT); |
| 339 | drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, |
| 340 | BO_USE_SCANOUT); |
| 341 | |
| 342 | metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1; |
| 343 | metadata.priority = 4; |
| 344 | |
| 345 | ret = drv_add_combinations(drv, supported_formats, |
| 346 | ARRAY_SIZE(supported_formats), &metadata, |
| 347 | flags); |
| 348 | if (ret) |
| 349 | return ret; |
| 350 | |
| 351 | return ret; |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 352 | } |
| 353 | |
| 354 | static void amdgpu_close(struct driver *drv) |
| 355 | { |
| 356 | AddrDestroy(drv->priv); |
| 357 | drv->priv = NULL; |
| 358 | } |
| 359 | |
| 360 | static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, |
| 361 | uint32_t format, uint32_t usage) |
| 362 | { |
| 363 | void *addrlib = bo->drv->priv; |
| 364 | union drm_amdgpu_gem_create gem_create; |
| 365 | struct amdgpu_bo_metadata metadata = {0}; |
| 366 | ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = {0}; |
| 367 | uint32_t tiling_flags = 0; |
Akshu Agrawal | 42e5bc0 | 2017-01-09 14:40:32 +0530 | [diff] [blame] | 368 | uint32_t gem_create_flags = 0; |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 369 | int ret; |
| 370 | |
| 371 | if (amdgpu_addrlib_compute(addrlib, width, |
| 372 | height, format, usage, |
| 373 | &tiling_flags, |
| 374 | &addr_out) < 0) |
| 375 | return -EINVAL; |
| 376 | |
| 377 | bo->tiling = tiling_flags; |
| 378 | bo->offsets[0] = 0; |
| 379 | bo->sizes[0] = addr_out.surfSize; |
| 380 | bo->strides[0] = addr_out.pixelPitch |
| 381 | * DIV_ROUND_UP(addr_out.pixelBits, 8); |
Akshu Agrawal | 42e5bc0 | 2017-01-09 14:40:32 +0530 | [diff] [blame] | 382 | if (usage & (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | |
| 383 | BO_USE_SW_WRITE_OFTEN | BO_USE_SW_WRITE_RARELY | |
| 384 | BO_USE_SW_READ_RARELY)) |
| 385 | gem_create_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
| 386 | else |
| 387 | gem_create_flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 388 | |
| 389 | memset(&gem_create, 0, sizeof(gem_create)); |
| 390 | gem_create.in.bo_size = bo->sizes[0]; |
| 391 | gem_create.in.alignment = addr_out.baseAlign; |
| 392 | /* Set the placement. */ |
| 393 | gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM; |
Akshu Agrawal | 42e5bc0 | 2017-01-09 14:40:32 +0530 | [diff] [blame] | 394 | gem_create.in.domain_flags = gem_create_flags; |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 395 | |
| 396 | /* Allocate the buffer with the preferred heap. */ |
| 397 | ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, |
| 398 | &gem_create, sizeof(gem_create)); |
| 399 | |
| 400 | if (ret < 0) |
| 401 | return ret; |
| 402 | |
| 403 | bo->handles[0].u32 = gem_create.out.handle; |
| 404 | |
| 405 | metadata.tiling_info = tiling_flags; |
| 406 | |
| 407 | ret = amdgpu_set_metadata(drv_get_fd(bo->drv), |
| 408 | bo->handles[0].u32, &metadata); |
| 409 | |
| 410 | return ret; |
| 411 | } |
| 412 | |
Pratik Vishwakarma | bc1b535 | 2016-12-12 14:22:10 +0530 | [diff] [blame] | 413 | static void *amdgpu_bo_map(struct bo *bo, struct map_info *data, size_t plane) |
| 414 | { |
| 415 | int ret; |
| 416 | union drm_amdgpu_gem_mmap gem_map; |
| 417 | |
| 418 | memset(&gem_map, 0, sizeof(gem_map)); |
| 419 | gem_map.in.handle = bo->handles[0].u32; |
| 420 | |
| 421 | ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map); |
| 422 | if (ret) { |
| 423 | fprintf(stderr, "drv: DRM_IOCTL_AMDGPU_GEM_MMAP failed\n"); |
| 424 | return MAP_FAILED; |
| 425 | } |
| 426 | data->length = bo->sizes[0]; |
| 427 | |
| 428 | return mmap(0, bo->sizes[0], PROT_READ | PROT_WRITE, MAP_SHARED, |
| 429 | bo->drv->fd, gem_map.out.addr_ptr); |
| 430 | } |
| 431 | |
Gurchetan Singh | 179687e | 2016-10-28 10:07:35 -0700 | [diff] [blame] | 432 | struct backend backend_amdgpu = { |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 433 | .name = "amdgpu", |
| 434 | .init = amdgpu_init, |
| 435 | .close = amdgpu_close, |
| 436 | .bo_create = amdgpu_bo_create, |
| 437 | .bo_destroy = drv_gem_bo_destroy, |
Gurchetan Singh | 71611d6 | 2017-01-03 16:49:56 -0800 | [diff] [blame] | 438 | .bo_import = drv_prime_bo_import, |
Pratik Vishwakarma | bc1b535 | 2016-12-12 14:22:10 +0530 | [diff] [blame] | 439 | .bo_map = amdgpu_bo_map, |
Akshu Agrawal | 0337d9b | 2016-07-28 15:35:45 +0530 | [diff] [blame] | 440 | }; |
| 441 | |
| 442 | #endif |
| 443 | |