blob: f48352e615df061ba6a568dee18c58c18ee5e3a5 [file] [log] [blame]
Stéphane Marchesin25a26062014-09-12 16:18:59 -07001/*
Daniele Castagna7a755de2016-12-16 17:32:30 -05002 * Copyright 2014 The Chromium OS Authors. All rights reserved.
Stéphane Marchesin25a26062014-09-12 16:18:59 -07003 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file.
5 */
6
Gurchetan Singh46faf6b2016-08-05 14:40:07 -07007#ifdef DRV_TEGRA
Stéphane Marchesin25a26062014-09-12 16:18:59 -07008
Ilja H. Friedelf9d2ab72015-04-09 14:08:36 -07009#include <stdio.h>
Stéphane Marchesin25a26062014-09-12 16:18:59 -070010#include <string.h>
Gurchetan Singhef920532016-08-12 16:38:25 -070011#include <sys/mman.h>
Stéphane Marchesin25a26062014-09-12 16:18:59 -070012#include <xf86drm.h>
13#include <tegra_drm.h>
14
Gurchetan Singh46faf6b2016-08-05 14:40:07 -070015#include "drv_priv.h"
Stéphane Marchesin25a26062014-09-12 16:18:59 -070016#include "helpers.h"
Yuly Novikov96c7a3b2015-12-08 22:48:29 -050017#include "util.h"
Stéphane Marchesin25a26062014-09-12 16:18:59 -070018
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080019/*
20 * GOB (Group Of Bytes) is the basic unit of the blocklinear layout.
21 * GOBs are arranged to blocks, where the height of the block (measured
22 * in GOBs) is configurable.
23 */
24#define NV_BLOCKLINEAR_GOB_HEIGHT 8
25#define NV_BLOCKLINEAR_GOB_WIDTH 64
26#define NV_DEFAULT_BLOCK_HEIGHT_LOG2 4
27#define NV_PREFERRED_PAGE_SIZE (128 * 1024)
28
29enum nv_mem_kind
Stéphane Marchesin25a26062014-09-12 16:18:59 -070030{
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080031 NV_MEM_KIND_PITCH = 0,
Vince Hsu0fd11422016-05-19 17:46:08 +080032 NV_MEM_KIND_C32_2CRA = 0xdb,
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080033 NV_MEM_KIND_GENERIC_16Bx2 = 0xfe,
34};
35
Gurchetan Singh44d1fe42016-12-14 08:51:28 -080036enum tegra_map_type {
37 TEGRA_READ_TILED_BUFFER = 0,
38 TEGRA_WRITE_TILED_BUFFER = 1,
39};
40
41struct tegra_private_map_data {
42 void *tiled;
43 void *untiled;
44};
45
Gurchetan Singh179687e2016-10-28 10:07:35 -070046static struct supported_combination combos[4] = {
47 {DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
Gurchetan Singh458976f2016-11-23 17:32:33 -080048 BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN},
Gurchetan Singh179687e2016-10-28 10:07:35 -070049 {DRM_FORMAT_ARGB8888, DRM_FORMAT_MOD_NONE,
Gurchetan Singh458976f2016-11-23 17:32:33 -080050 BO_USE_RENDERING | BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY},
Gurchetan Singh179687e2016-10-28 10:07:35 -070051 {DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
Gurchetan Singh458976f2016-11-23 17:32:33 -080052 BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN},
Gurchetan Singh179687e2016-10-28 10:07:35 -070053 {DRM_FORMAT_XRGB8888, DRM_FORMAT_MOD_NONE,
Gurchetan Singh458976f2016-11-23 17:32:33 -080054 BO_USE_RENDERING | BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY},
Gurchetan Singh179687e2016-10-28 10:07:35 -070055};
56
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080057static int compute_block_height_log2(int height)
58{
59 int block_height_log2 = NV_DEFAULT_BLOCK_HEIGHT_LOG2;
60
61 if (block_height_log2 > 0) {
62 /* Shrink, if a smaller block height could cover the whole
63 * surface height. */
64 int proposed = NV_BLOCKLINEAR_GOB_HEIGHT << (block_height_log2 - 1);
65 while (proposed >= height) {
66 block_height_log2--;
67 if (block_height_log2 == 0)
68 break;
69 proposed /= 2;
70 }
71 }
72 return block_height_log2;
73}
74
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080075static void compute_layout_blocklinear(int width, int height, int format,
Stéphane Marchesinec88e892015-11-03 16:14:59 -080076 enum nv_mem_kind *kind,
77 uint32_t *block_height_log2,
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080078 uint32_t *stride, uint32_t *size)
79{
Gurchetan Singh83dc4fb2016-07-19 15:52:33 -070080 int pitch = drv_stride_from_format(format, width, 0);
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080081
82 /* Align to blocklinear blocks. */
Yuly Novikov96c7a3b2015-12-08 22:48:29 -050083 pitch = ALIGN(pitch, NV_BLOCKLINEAR_GOB_WIDTH);
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080084
85 /* Compute padded height. */
86 *block_height_log2 = compute_block_height_log2(height);
87 int block_height = 1 << *block_height_log2;
Stéphane Marchesinec88e892015-11-03 16:14:59 -080088 int padded_height =
Yuly Novikov96c7a3b2015-12-08 22:48:29 -050089 ALIGN(height, NV_BLOCKLINEAR_GOB_HEIGHT * block_height);
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080090
91 int bytes = pitch * padded_height;
92
93 /* Pad the allocation to the preferred page size.
94 * This will reduce the required page table size (see discussion in NV
95 * bug 1321091), and also acts as a WAR for NV bug 1325421.
96 */
Yuly Novikov96c7a3b2015-12-08 22:48:29 -050097 bytes = ALIGN(bytes, NV_PREFERRED_PAGE_SIZE);
Lauri Peltonen7842d8f2014-12-17 23:01:37 -080098
Vince Hsu0fd11422016-05-19 17:46:08 +080099 *kind = NV_MEM_KIND_C32_2CRA;
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800100 *stride = pitch;
101 *size = bytes;
102}
103
104static void compute_layout_linear(int width, int height, int format,
105 uint32_t *stride, uint32_t *size)
106{
Gurchetan Singh5972eec2016-12-16 15:51:46 -0800107 *stride = ALIGN(drv_stride_from_format(format, width, 0), 64);
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800108 *size = *stride * height;
109}
110
Gurchetan Singh44d1fe42016-12-14 08:51:28 -0800111static void transfer_tile(struct bo *bo, uint8_t *tiled, uint8_t *untiled,
112 enum tegra_map_type type, uint32_t bytes_per_pixel,
113 uint32_t gob_top, uint32_t gob_left,
114 uint32_t gob_size_pixels)
115{
116 uint8_t *tmp;
117 uint32_t x, y, k;
118 for (k = 0; k < gob_size_pixels; k++) {
119 /*
120 * Given the kth pixel starting from the tile specified by
121 * gob_top and gob_left, unswizzle to get the standard (x, y)
122 * representation.
123 */
124 x = gob_left + (((k >> 3) & 8) | ((k >> 1) & 4) | (k & 3));
125 y = gob_top + ((k >> 7 << 3) | ((k >> 3) & 6) | ((k >> 2) & 1));
126
127 tmp = untiled + (y * bo->strides[0]) + (x * bytes_per_pixel);
128
129 if (type == TEGRA_READ_TILED_BUFFER)
130 memcpy(tmp, tiled, bytes_per_pixel);
131 else if (type == TEGRA_WRITE_TILED_BUFFER)
132 memcpy(tiled, tmp, bytes_per_pixel);
133
134 /* Move on to next pixel. */
135 tiled += bytes_per_pixel;
136 }
137}
138
139static void transfer_tiled_memory(struct bo *bo, uint8_t *tiled,
140 uint8_t *untiled, enum tegra_map_type type)
141{
142 uint32_t gob_width, gob_height, gob_size_bytes, gob_size_pixels,
143 gob_count_x, gob_count_y, gob_top, gob_left;
144 uint32_t i, j, offset;
145 uint8_t *tmp;
146 uint32_t bytes_per_pixel = drv_stride_from_format(bo->format, 1, 0);
147
148 /*
149 * The blocklinear format consists of 8*(2^n) x 64 byte sized tiles,
150 * where 0 <= n <= 4.
151 */
152 gob_width = DIV_ROUND_UP(NV_BLOCKLINEAR_GOB_WIDTH, bytes_per_pixel);
153 gob_height = NV_BLOCKLINEAR_GOB_HEIGHT *
154 (1 << NV_DEFAULT_BLOCK_HEIGHT_LOG2);
155 /* Calculate the height from maximum possible gob height */
156 while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT
157 && gob_height >= 2 * bo->height)
158 gob_height /= 2;
159
160 gob_size_bytes = gob_height * NV_BLOCKLINEAR_GOB_WIDTH;
161 gob_size_pixels = gob_height * gob_width;
162
163 gob_count_x = DIV_ROUND_UP(bo->strides[0], NV_BLOCKLINEAR_GOB_WIDTH);
164 gob_count_y = DIV_ROUND_UP(bo->height, gob_height);
165
166 offset = 0;
167 for (j = 0; j < gob_count_y; j++) {
168 gob_top = j * gob_height;
169 for (i = 0; i < gob_count_x; i++) {
170 tmp = tiled + offset;
171 gob_left = i * gob_width;
172
173 transfer_tile(bo, tmp, untiled, type, bytes_per_pixel,
174 gob_top, gob_left, gob_size_pixels);
175
176 offset += gob_size_bytes;
177 }
178 }
179}
180
Gurchetan Singh179687e2016-10-28 10:07:35 -0700181static int tegra_init(struct driver *drv)
182{
183 drv_insert_combinations(drv, combos, ARRAY_SIZE(combos));
184 return drv_add_kms_flags(drv);
185}
186
Gurchetan Singhd7c84fd2016-08-16 18:18:24 -0700187static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height,
188 uint32_t format, uint32_t flags)
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800189{
190 uint32_t size, stride, block_height_log2 = 0;
191 enum nv_mem_kind kind = NV_MEM_KIND_PITCH;
Stéphane Marchesin25a26062014-09-12 16:18:59 -0700192 struct drm_tegra_gem_create gem_create;
193 int ret;
194
Gurchetan Singh458976f2016-11-23 17:32:33 -0800195 if (flags & BO_USE_RENDERING)
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800196 compute_layout_blocklinear(width, height, format, &kind,
197 &block_height_log2, &stride, &size);
198 else
199 compute_layout_linear(width, height, format, &stride, &size);
200
Stéphane Marchesin25a26062014-09-12 16:18:59 -0700201 memset(&gem_create, 0, sizeof(gem_create));
202 gem_create.size = size;
203 gem_create.flags = 0;
204
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700205 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_CREATE, &gem_create);
Ilja H. Friedelf9d2ab72015-04-09 14:08:36 -0700206 if (ret) {
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700207 fprintf(stderr, "drv: DRM_IOCTL_TEGRA_GEM_CREATE failed "
Ilja H. Friedelf9d2ab72015-04-09 14:08:36 -0700208 "(size=%zu)\n", size);
Stéphane Marchesin25a26062014-09-12 16:18:59 -0700209 return ret;
Ilja H. Friedelf9d2ab72015-04-09 14:08:36 -0700210 }
Stéphane Marchesin25a26062014-09-12 16:18:59 -0700211
Yuly Novikov96c7a3b2015-12-08 22:48:29 -0500212 bo->handles[0].u32 = gem_create.handle;
213 bo->offsets[0] = 0;
Gurchetan Singha40ca9e2016-08-29 19:51:45 -0700214 bo->total_size = bo->sizes[0] = size;
Yuly Novikov96c7a3b2015-12-08 22:48:29 -0500215 bo->strides[0] = stride;
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800216
217 if (kind != NV_MEM_KIND_PITCH) {
218 struct drm_tegra_gem_set_tiling gem_tile;
219
220 memset(&gem_tile, 0, sizeof(gem_tile));
Yuly Novikov96c7a3b2015-12-08 22:48:29 -0500221 gem_tile.handle = bo->handles[0].u32;
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800222 gem_tile.mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
223 gem_tile.value = block_height_log2;
224
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700225 ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_SET_TILING,
Stéphane Marchesinec88e892015-11-03 16:14:59 -0800226 &gem_tile, sizeof(gem_tile));
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800227 if (ret < 0) {
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700228 drv_gem_bo_destroy(bo);
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800229 return ret;
230 }
231
232 /* Encode blocklinear parameters for EGLImage creation. */
Vince Hsuf9e7c4c2016-05-19 18:11:56 +0800233 bo->tiling = (kind & 0xff) |
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800234 ((block_height_log2 & 0xf) << 8);
Gurchetan Singhf3b22da2016-11-21 10:46:38 -0800235 bo->format_modifiers[0] = fourcc_mod_code(NV, bo->tiling);
Lauri Peltonen7842d8f2014-12-17 23:01:37 -0800236 }
Stéphane Marchesin25a26062014-09-12 16:18:59 -0700237
238 return 0;
239}
240
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700241static void *tegra_bo_map(struct bo *bo, struct map_info *data, size_t plane)
Gurchetan Singhef920532016-08-12 16:38:25 -0700242{
243 int ret;
244 struct drm_tegra_gem_mmap gem_map;
Gurchetan Singh44d1fe42016-12-14 08:51:28 -0800245 struct tegra_private_map_data *priv;
Gurchetan Singhef920532016-08-12 16:38:25 -0700246
247 memset(&gem_map, 0, sizeof(gem_map));
248 gem_map.handle = bo->handles[0].u32;
249
250 ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_MMAP, &gem_map,
251 sizeof(gem_map));
252 if (ret < 0) {
253 fprintf(stderr, "drv: DRM_TEGRA_GEM_MMAP failed\n");
254 return MAP_FAILED;
255 }
256
Gurchetan Singh44d1fe42016-12-14 08:51:28 -0800257 void *addr = mmap(0, bo->total_size, PROT_READ | PROT_WRITE, MAP_SHARED,
258 bo->drv->fd, gem_map.offset);
259
Gurchetan Singh1a31e602016-10-06 10:58:00 -0700260 data->length = bo->total_size;
261
Gurchetan Singh44d1fe42016-12-14 08:51:28 -0800262 if ((bo->tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) {
263 priv = calloc(1, sizeof(*priv));
264 priv->untiled = calloc(1, bo->total_size);
265 priv->tiled = addr;
266 data->priv = priv;
267 transfer_tiled_memory(bo, priv->tiled, priv->untiled,
268 TEGRA_READ_TILED_BUFFER);
269 addr = priv->untiled;
270 }
271
272 return addr;
273}
274
275static int tegra_bo_unmap(struct bo *bo, struct map_info *data)
276{
277 if (data->priv) {
278 struct tegra_private_map_data *priv = data->priv;
279 transfer_tiled_memory(bo, priv->tiled, priv->untiled,
280 TEGRA_WRITE_TILED_BUFFER);
281 data->addr = priv->tiled;
282 free(priv->untiled);
283 free(priv);
284 data->priv = NULL;
285 }
286
287 return munmap(data->addr, data->length);
Gurchetan Singhef920532016-08-12 16:38:25 -0700288}
289
Gurchetan Singh179687e2016-10-28 10:07:35 -0700290struct backend backend_tegra =
Stéphane Marchesin25a26062014-09-12 16:18:59 -0700291{
292 .name = "tegra",
Gurchetan Singh179687e2016-10-28 10:07:35 -0700293 .init = tegra_init,
Gurchetan Singhd7c84fd2016-08-16 18:18:24 -0700294 .bo_create = tegra_bo_create,
Gurchetan Singh46faf6b2016-08-05 14:40:07 -0700295 .bo_destroy = drv_gem_bo_destroy,
Gurchetan Singh71611d62017-01-03 16:49:56 -0800296 .bo_import = drv_prime_bo_import,
Gurchetan Singhd7c84fd2016-08-16 18:18:24 -0700297 .bo_map = tegra_bo_map,
Gurchetan Singh44d1fe42016-12-14 08:51:28 -0800298 .bo_unmap = tegra_bo_unmap,
Stéphane Marchesin25a26062014-09-12 16:18:59 -0700299};
300
301#endif