anv: Rename cpp variable to "bs"
cpp (chars-per-pixel) is an integer that fails to give useful data
about most compressed formats. Instead, rename it to "bs" which
stands for block size (in bytes).
v2: Rename vk_format_for_bs to vk_format_for_size (Chad)
Use "block size" instead of "bs" in error message (Chad)
Reviewed-by: Chad Versace <chad.versace@intel.com>
diff --git a/src/vulkan/anv_formats.c b/src/vulkan/anv_formats.c
index 4efc537..2821234 100644
--- a/src/vulkan/anv_formats.c
+++ b/src/vulkan/anv_formats.c
@@ -30,7 +30,7 @@
[__vk_fmt] = { .vk_format = __vk_fmt, .name = #__vk_fmt, __VA_ARGS__ }
static const struct anv_format anv_formats[] = {
- fmt(VK_FORMAT_UNDEFINED, RAW, .cpp = 1, .num_channels = 1),
+ fmt(VK_FORMAT_UNDEFINED, RAW, .bs = 1, .num_channels = 1),
fmt(VK_FORMAT_R4G4_UNORM, UNSUPPORTED),
fmt(VK_FORMAT_R4G4_USCALED, UNSUPPORTED),
fmt(VK_FORMAT_R4G4B4A4_UNORM, UNSUPPORTED),
@@ -39,94 +39,94 @@
fmt(VK_FORMAT_R5G6B5_USCALED, UNSUPPORTED),
fmt(VK_FORMAT_R5G5B5A1_UNORM, UNSUPPORTED),
fmt(VK_FORMAT_R5G5B5A1_USCALED, UNSUPPORTED),
- fmt(VK_FORMAT_R8_UNORM, R8_UNORM, .cpp = 1, .num_channels = 1),
- fmt(VK_FORMAT_R8_SNORM, R8_SNORM, .cpp = 1, .num_channels = 1,),
- fmt(VK_FORMAT_R8_USCALED, R8_USCALED, .cpp = 1, .num_channels = 1),
- fmt(VK_FORMAT_R8_SSCALED, R8_SSCALED, .cpp = 1, .num_channels = 1),
- fmt(VK_FORMAT_R8_UINT, R8_UINT, .cpp = 1, .num_channels = 1),
- fmt(VK_FORMAT_R8_SINT, R8_SINT, .cpp = 1, .num_channels = 1),
+ fmt(VK_FORMAT_R8_UNORM, R8_UNORM, .bs = 1, .num_channels = 1),
+ fmt(VK_FORMAT_R8_SNORM, R8_SNORM, .bs = 1, .num_channels = 1,),
+ fmt(VK_FORMAT_R8_USCALED, R8_USCALED, .bs = 1, .num_channels = 1),
+ fmt(VK_FORMAT_R8_SSCALED, R8_SSCALED, .bs = 1, .num_channels = 1),
+ fmt(VK_FORMAT_R8_UINT, R8_UINT, .bs = 1, .num_channels = 1),
+ fmt(VK_FORMAT_R8_SINT, R8_SINT, .bs = 1, .num_channels = 1),
fmt(VK_FORMAT_R8_SRGB, UNSUPPORTED),
- fmt(VK_FORMAT_R8G8_UNORM, R8G8_UNORM, .cpp = 2, .num_channels = 2),
- fmt(VK_FORMAT_R8G8_SNORM, R8G8_SNORM, .cpp = 2, .num_channels = 2),
- fmt(VK_FORMAT_R8G8_USCALED, R8G8_USCALED, .cpp = 2, .num_channels = 2),
- fmt(VK_FORMAT_R8G8_SSCALED, R8G8_SSCALED, .cpp = 2, .num_channels = 2),
- fmt(VK_FORMAT_R8G8_UINT, R8G8_UINT, .cpp = 2, .num_channels = 2),
- fmt(VK_FORMAT_R8G8_SINT, R8G8_SINT, .cpp = 2, .num_channels = 2),
+ fmt(VK_FORMAT_R8G8_UNORM, R8G8_UNORM, .bs = 2, .num_channels = 2),
+ fmt(VK_FORMAT_R8G8_SNORM, R8G8_SNORM, .bs = 2, .num_channels = 2),
+ fmt(VK_FORMAT_R8G8_USCALED, R8G8_USCALED, .bs = 2, .num_channels = 2),
+ fmt(VK_FORMAT_R8G8_SSCALED, R8G8_SSCALED, .bs = 2, .num_channels = 2),
+ fmt(VK_FORMAT_R8G8_UINT, R8G8_UINT, .bs = 2, .num_channels = 2),
+ fmt(VK_FORMAT_R8G8_SINT, R8G8_SINT, .bs = 2, .num_channels = 2),
fmt(VK_FORMAT_R8G8_SRGB, UNSUPPORTED), /* L8A8_UNORM_SRGB */
- fmt(VK_FORMAT_R8G8B8_UNORM, R8G8B8X8_UNORM, .cpp = 3, .num_channels = 3),
- fmt(VK_FORMAT_R8G8B8_SNORM, R8G8B8_SNORM, .cpp = 3, .num_channels = 3),
- fmt(VK_FORMAT_R8G8B8_USCALED, R8G8B8_USCALED, .cpp = 3, .num_channels = 3),
- fmt(VK_FORMAT_R8G8B8_SSCALED, R8G8B8_SSCALED, .cpp = 3, .num_channels = 3),
- fmt(VK_FORMAT_R8G8B8_UINT, R8G8B8_UINT, .cpp = 3, .num_channels = 3),
- fmt(VK_FORMAT_R8G8B8_SINT, R8G8B8_SINT, .cpp = 3, .num_channels = 3),
+ fmt(VK_FORMAT_R8G8B8_UNORM, R8G8B8X8_UNORM, .bs = 3, .num_channels = 3),
+ fmt(VK_FORMAT_R8G8B8_SNORM, R8G8B8_SNORM, .bs = 3, .num_channels = 3),
+ fmt(VK_FORMAT_R8G8B8_USCALED, R8G8B8_USCALED, .bs = 3, .num_channels = 3),
+ fmt(VK_FORMAT_R8G8B8_SSCALED, R8G8B8_SSCALED, .bs = 3, .num_channels = 3),
+ fmt(VK_FORMAT_R8G8B8_UINT, R8G8B8_UINT, .bs = 3, .num_channels = 3),
+ fmt(VK_FORMAT_R8G8B8_SINT, R8G8B8_SINT, .bs = 3, .num_channels = 3),
fmt(VK_FORMAT_R8G8B8_SRGB, UNSUPPORTED), /* B8G8R8A8_UNORM_SRGB */
- fmt(VK_FORMAT_R8G8B8A8_UNORM, R8G8B8A8_UNORM, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R8G8B8A8_SNORM, R8G8B8A8_SNORM, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R8G8B8A8_USCALED, R8G8B8A8_USCALED, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R8G8B8A8_SSCALED, R8G8B8A8_SSCALED, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R8G8B8A8_UINT, R8G8B8A8_UINT, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R8G8B8A8_SINT, R8G8B8A8_SINT, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R8G8B8A8_SRGB, R8G8B8A8_UNORM_SRGB, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R10G10B10A2_UNORM, R10G10B10A2_UNORM, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R10G10B10A2_SNORM, R10G10B10A2_SNORM, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R10G10B10A2_USCALED, R10G10B10A2_USCALED, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R10G10B10A2_SSCALED, R10G10B10A2_SSCALED, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R10G10B10A2_UINT, R10G10B10A2_UINT, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R10G10B10A2_SINT, R10G10B10A2_SINT, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_R16_UNORM, R16_UNORM, .cpp = 2, .num_channels = 1),
- fmt(VK_FORMAT_R16_SNORM, R16_SNORM, .cpp = 2, .num_channels = 1),
- fmt(VK_FORMAT_R16_USCALED, R16_USCALED, .cpp = 2, .num_channels = 1),
- fmt(VK_FORMAT_R16_SSCALED, R16_SSCALED, .cpp = 2, .num_channels = 1),
- fmt(VK_FORMAT_R16_UINT, R16_UINT, .cpp = 2, .num_channels = 1),
- fmt(VK_FORMAT_R16_SINT, R16_SINT, .cpp = 2, .num_channels = 1),
- fmt(VK_FORMAT_R16_SFLOAT, R16_FLOAT, .cpp = 2, .num_channels = 1),
- fmt(VK_FORMAT_R16G16_UNORM, R16G16_UNORM, .cpp = 4, .num_channels = 2),
- fmt(VK_FORMAT_R16G16_SNORM, R16G16_SNORM, .cpp = 4, .num_channels = 2),
- fmt(VK_FORMAT_R16G16_USCALED, R16G16_USCALED, .cpp = 4, .num_channels = 2),
- fmt(VK_FORMAT_R16G16_SSCALED, R16G16_SSCALED, .cpp = 4, .num_channels = 2),
- fmt(VK_FORMAT_R16G16_UINT, R16G16_UINT, .cpp = 4, .num_channels = 2),
- fmt(VK_FORMAT_R16G16_SINT, R16G16_SINT, .cpp = 4, .num_channels = 2),
- fmt(VK_FORMAT_R16G16_SFLOAT, R16G16_FLOAT, .cpp = 4, .num_channels = 2),
- fmt(VK_FORMAT_R16G16B16_UNORM, R16G16B16_UNORM, .cpp = 6, .num_channels = 3),
- fmt(VK_FORMAT_R16G16B16_SNORM, R16G16B16_SNORM, .cpp = 6, .num_channels = 3),
- fmt(VK_FORMAT_R16G16B16_USCALED, R16G16B16_USCALED, .cpp = 6, .num_channels = 3),
- fmt(VK_FORMAT_R16G16B16_SSCALED, R16G16B16_SSCALED, .cpp = 6, .num_channels = 3),
- fmt(VK_FORMAT_R16G16B16_UINT, R16G16B16_UINT, .cpp = 6, .num_channels = 3),
- fmt(VK_FORMAT_R16G16B16_SINT, R16G16B16_SINT, .cpp = 6, .num_channels = 3),
- fmt(VK_FORMAT_R16G16B16_SFLOAT, R16G16B16_FLOAT, .cpp = 6, .num_channels = 3),
- fmt(VK_FORMAT_R16G16B16A16_UNORM, R16G16B16A16_UNORM, .cpp = 8, .num_channels = 4),
- fmt(VK_FORMAT_R16G16B16A16_SNORM, R16G16B16A16_SNORM, .cpp = 8, .num_channels = 4),
- fmt(VK_FORMAT_R16G16B16A16_USCALED, R16G16B16A16_USCALED, .cpp = 8, .num_channels = 4),
- fmt(VK_FORMAT_R16G16B16A16_SSCALED, R16G16B16A16_SSCALED, .cpp = 8, .num_channels = 4),
- fmt(VK_FORMAT_R16G16B16A16_UINT, R16G16B16A16_UINT, .cpp = 8, .num_channels = 4),
- fmt(VK_FORMAT_R16G16B16A16_SINT, R16G16B16A16_SINT, .cpp = 8, .num_channels = 4),
- fmt(VK_FORMAT_R16G16B16A16_SFLOAT, R16G16B16A16_FLOAT, .cpp = 8, .num_channels = 4),
- fmt(VK_FORMAT_R32_UINT, R32_UINT, .cpp = 4, .num_channels = 1,),
- fmt(VK_FORMAT_R32_SINT, R32_SINT, .cpp = 4, .num_channels = 1,),
- fmt(VK_FORMAT_R32_SFLOAT, R32_FLOAT, .cpp = 4, .num_channels = 1,),
- fmt(VK_FORMAT_R32G32_UINT, R32G32_UINT, .cpp = 8, .num_channels = 2,),
- fmt(VK_FORMAT_R32G32_SINT, R32G32_SINT, .cpp = 8, .num_channels = 2,),
- fmt(VK_FORMAT_R32G32_SFLOAT, R32G32_FLOAT, .cpp = 8, .num_channels = 2,),
- fmt(VK_FORMAT_R32G32B32_UINT, R32G32B32_UINT, .cpp = 12, .num_channels = 3,),
- fmt(VK_FORMAT_R32G32B32_SINT, R32G32B32_SINT, .cpp = 12, .num_channels = 3,),
- fmt(VK_FORMAT_R32G32B32_SFLOAT, R32G32B32_FLOAT, .cpp = 12, .num_channels = 3,),
- fmt(VK_FORMAT_R32G32B32A32_UINT, R32G32B32A32_UINT, .cpp = 16, .num_channels = 4,),
- fmt(VK_FORMAT_R32G32B32A32_SINT, R32G32B32A32_SINT, .cpp = 16, .num_channels = 4,),
- fmt(VK_FORMAT_R32G32B32A32_SFLOAT, R32G32B32A32_FLOAT, .cpp = 16, .num_channels = 4,),
- fmt(VK_FORMAT_R64_SFLOAT, R64_FLOAT, .cpp = 8, .num_channels = 1),
- fmt(VK_FORMAT_R64G64_SFLOAT, R64G64_FLOAT, .cpp = 16, .num_channels = 2),
- fmt(VK_FORMAT_R64G64B64_SFLOAT, R64G64B64_FLOAT, .cpp = 24, .num_channels = 3),
- fmt(VK_FORMAT_R64G64B64A64_SFLOAT, R64G64B64A64_FLOAT, .cpp = 32, .num_channels = 4),
- fmt(VK_FORMAT_R11G11B10_UFLOAT, R11G11B10_FLOAT, .cpp = 4, .num_channels = 3),
- fmt(VK_FORMAT_R9G9B9E5_UFLOAT, R9G9B9E5_SHAREDEXP, .cpp = 4, .num_channels = 3),
+ fmt(VK_FORMAT_R8G8B8A8_UNORM, R8G8B8A8_UNORM, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R8G8B8A8_SNORM, R8G8B8A8_SNORM, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R8G8B8A8_USCALED, R8G8B8A8_USCALED, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R8G8B8A8_SSCALED, R8G8B8A8_SSCALED, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R8G8B8A8_UINT, R8G8B8A8_UINT, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R8G8B8A8_SINT, R8G8B8A8_SINT, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R8G8B8A8_SRGB, R8G8B8A8_UNORM_SRGB, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R10G10B10A2_UNORM, R10G10B10A2_UNORM, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R10G10B10A2_SNORM, R10G10B10A2_SNORM, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R10G10B10A2_USCALED, R10G10B10A2_USCALED, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R10G10B10A2_SSCALED, R10G10B10A2_SSCALED, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R10G10B10A2_UINT, R10G10B10A2_UINT, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R10G10B10A2_SINT, R10G10B10A2_SINT, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_R16_UNORM, R16_UNORM, .bs = 2, .num_channels = 1),
+ fmt(VK_FORMAT_R16_SNORM, R16_SNORM, .bs = 2, .num_channels = 1),
+ fmt(VK_FORMAT_R16_USCALED, R16_USCALED, .bs = 2, .num_channels = 1),
+ fmt(VK_FORMAT_R16_SSCALED, R16_SSCALED, .bs = 2, .num_channels = 1),
+ fmt(VK_FORMAT_R16_UINT, R16_UINT, .bs = 2, .num_channels = 1),
+ fmt(VK_FORMAT_R16_SINT, R16_SINT, .bs = 2, .num_channels = 1),
+ fmt(VK_FORMAT_R16_SFLOAT, R16_FLOAT, .bs = 2, .num_channels = 1),
+ fmt(VK_FORMAT_R16G16_UNORM, R16G16_UNORM, .bs = 4, .num_channels = 2),
+ fmt(VK_FORMAT_R16G16_SNORM, R16G16_SNORM, .bs = 4, .num_channels = 2),
+ fmt(VK_FORMAT_R16G16_USCALED, R16G16_USCALED, .bs = 4, .num_channels = 2),
+ fmt(VK_FORMAT_R16G16_SSCALED, R16G16_SSCALED, .bs = 4, .num_channels = 2),
+ fmt(VK_FORMAT_R16G16_UINT, R16G16_UINT, .bs = 4, .num_channels = 2),
+ fmt(VK_FORMAT_R16G16_SINT, R16G16_SINT, .bs = 4, .num_channels = 2),
+ fmt(VK_FORMAT_R16G16_SFLOAT, R16G16_FLOAT, .bs = 4, .num_channels = 2),
+ fmt(VK_FORMAT_R16G16B16_UNORM, R16G16B16_UNORM, .bs = 6, .num_channels = 3),
+ fmt(VK_FORMAT_R16G16B16_SNORM, R16G16B16_SNORM, .bs = 6, .num_channels = 3),
+ fmt(VK_FORMAT_R16G16B16_USCALED, R16G16B16_USCALED, .bs = 6, .num_channels = 3),
+ fmt(VK_FORMAT_R16G16B16_SSCALED, R16G16B16_SSCALED, .bs = 6, .num_channels = 3),
+ fmt(VK_FORMAT_R16G16B16_UINT, R16G16B16_UINT, .bs = 6, .num_channels = 3),
+ fmt(VK_FORMAT_R16G16B16_SINT, R16G16B16_SINT, .bs = 6, .num_channels = 3),
+ fmt(VK_FORMAT_R16G16B16_SFLOAT, R16G16B16_FLOAT, .bs = 6, .num_channels = 3),
+ fmt(VK_FORMAT_R16G16B16A16_UNORM, R16G16B16A16_UNORM, .bs = 8, .num_channels = 4),
+ fmt(VK_FORMAT_R16G16B16A16_SNORM, R16G16B16A16_SNORM, .bs = 8, .num_channels = 4),
+ fmt(VK_FORMAT_R16G16B16A16_USCALED, R16G16B16A16_USCALED, .bs = 8, .num_channels = 4),
+ fmt(VK_FORMAT_R16G16B16A16_SSCALED, R16G16B16A16_SSCALED, .bs = 8, .num_channels = 4),
+ fmt(VK_FORMAT_R16G16B16A16_UINT, R16G16B16A16_UINT, .bs = 8, .num_channels = 4),
+ fmt(VK_FORMAT_R16G16B16A16_SINT, R16G16B16A16_SINT, .bs = 8, .num_channels = 4),
+ fmt(VK_FORMAT_R16G16B16A16_SFLOAT, R16G16B16A16_FLOAT, .bs = 8, .num_channels = 4),
+ fmt(VK_FORMAT_R32_UINT, R32_UINT, .bs = 4, .num_channels = 1,),
+ fmt(VK_FORMAT_R32_SINT, R32_SINT, .bs = 4, .num_channels = 1,),
+ fmt(VK_FORMAT_R32_SFLOAT, R32_FLOAT, .bs = 4, .num_channels = 1,),
+ fmt(VK_FORMAT_R32G32_UINT, R32G32_UINT, .bs = 8, .num_channels = 2,),
+ fmt(VK_FORMAT_R32G32_SINT, R32G32_SINT, .bs = 8, .num_channels = 2,),
+ fmt(VK_FORMAT_R32G32_SFLOAT, R32G32_FLOAT, .bs = 8, .num_channels = 2,),
+ fmt(VK_FORMAT_R32G32B32_UINT, R32G32B32_UINT, .bs = 12, .num_channels = 3,),
+ fmt(VK_FORMAT_R32G32B32_SINT, R32G32B32_SINT, .bs = 12, .num_channels = 3,),
+ fmt(VK_FORMAT_R32G32B32_SFLOAT, R32G32B32_FLOAT, .bs = 12, .num_channels = 3,),
+ fmt(VK_FORMAT_R32G32B32A32_UINT, R32G32B32A32_UINT, .bs = 16, .num_channels = 4,),
+ fmt(VK_FORMAT_R32G32B32A32_SINT, R32G32B32A32_SINT, .bs = 16, .num_channels = 4,),
+ fmt(VK_FORMAT_R32G32B32A32_SFLOAT, R32G32B32A32_FLOAT, .bs = 16, .num_channels = 4,),
+ fmt(VK_FORMAT_R64_SFLOAT, R64_FLOAT, .bs = 8, .num_channels = 1),
+ fmt(VK_FORMAT_R64G64_SFLOAT, R64G64_FLOAT, .bs = 16, .num_channels = 2),
+ fmt(VK_FORMAT_R64G64B64_SFLOAT, R64G64B64_FLOAT, .bs = 24, .num_channels = 3),
+ fmt(VK_FORMAT_R64G64B64A64_SFLOAT, R64G64B64A64_FLOAT, .bs = 32, .num_channels = 4),
+ fmt(VK_FORMAT_R11G11B10_UFLOAT, R11G11B10_FLOAT, .bs = 4, .num_channels = 3),
+ fmt(VK_FORMAT_R9G9B9E5_UFLOAT, R9G9B9E5_SHAREDEXP, .bs = 4, .num_channels = 3),
- fmt(VK_FORMAT_D16_UNORM, R16_UNORM, .cpp = 2, .num_channels = 1, .depth_format = D16_UNORM),
- fmt(VK_FORMAT_D24_UNORM_X8, R24_UNORM_X8_TYPELESS, .cpp = 4, .num_channels = 1, .depth_format = D24_UNORM_X8_UINT),
- fmt(VK_FORMAT_D32_SFLOAT, R32_FLOAT, .cpp = 4, .num_channels = 1, .depth_format = D32_FLOAT),
- fmt(VK_FORMAT_S8_UINT, R8_UINT, .cpp = 1, .num_channels = 1, .has_stencil = true),
- fmt(VK_FORMAT_D16_UNORM_S8_UINT, R16_UNORM, .cpp = 2, .num_channels = 2, .depth_format = D16_UNORM, .has_stencil = true),
- fmt(VK_FORMAT_D24_UNORM_S8_UINT, R24_UNORM_X8_TYPELESS, .cpp = 4, .num_channels = 2, .depth_format = D24_UNORM_X8_UINT, .has_stencil = true),
- fmt(VK_FORMAT_D32_SFLOAT_S8_UINT, R32_FLOAT, .cpp = 4, .num_channels = 2, .depth_format = D32_FLOAT, .has_stencil = true),
+ fmt(VK_FORMAT_D16_UNORM, R16_UNORM, .bs = 2, .num_channels = 1, .depth_format = D16_UNORM),
+ fmt(VK_FORMAT_D24_UNORM_X8, R24_UNORM_X8_TYPELESS, .bs = 4, .num_channels = 1, .depth_format = D24_UNORM_X8_UINT),
+ fmt(VK_FORMAT_D32_SFLOAT, R32_FLOAT, .bs = 4, .num_channels = 1, .depth_format = D32_FLOAT),
+ fmt(VK_FORMAT_S8_UINT, R8_UINT, .bs = 1, .num_channels = 1, .has_stencil = true),
+ fmt(VK_FORMAT_D16_UNORM_S8_UINT, R16_UNORM, .bs = 2, .num_channels = 2, .depth_format = D16_UNORM, .has_stencil = true),
+ fmt(VK_FORMAT_D24_UNORM_S8_UINT, R24_UNORM_X8_TYPELESS, .bs = 4, .num_channels = 2, .depth_format = D24_UNORM_X8_UINT, .has_stencil = true),
+ fmt(VK_FORMAT_D32_SFLOAT_S8_UINT, R32_FLOAT, .bs = 4, .num_channels = 2, .depth_format = D32_FLOAT, .has_stencil = true),
fmt(VK_FORMAT_BC1_RGB_UNORM, UNSUPPORTED),
fmt(VK_FORMAT_BC1_RGB_SRGB, UNSUPPORTED),
@@ -182,9 +182,9 @@
fmt(VK_FORMAT_ASTC_12x10_SRGB, UNSUPPORTED),
fmt(VK_FORMAT_ASTC_12x12_UNORM, UNSUPPORTED),
fmt(VK_FORMAT_ASTC_12x12_SRGB, UNSUPPORTED),
- fmt(VK_FORMAT_B4G4R4A4_UNORM, B4G4R4A4_UNORM, .cpp = 2, .num_channels = 4),
- fmt(VK_FORMAT_B5G5R5A1_UNORM, B5G5R5A1_UNORM, .cpp = 2, .num_channels = 4),
- fmt(VK_FORMAT_B5G6R5_UNORM, B5G6R5_UNORM, .cpp = 2, .num_channels = 3),
+ fmt(VK_FORMAT_B4G4R4A4_UNORM, B4G4R4A4_UNORM, .bs = 2, .num_channels = 4),
+ fmt(VK_FORMAT_B5G5R5A1_UNORM, B5G5R5A1_UNORM, .bs = 2, .num_channels = 4),
+ fmt(VK_FORMAT_B5G6R5_UNORM, B5G6R5_UNORM, .bs = 2, .num_channels = 3),
fmt(VK_FORMAT_B5G6R5_USCALED, UNSUPPORTED),
fmt(VK_FORMAT_B8G8R8_UNORM, UNSUPPORTED),
fmt(VK_FORMAT_B8G8R8_SNORM, UNSUPPORTED),
@@ -193,19 +193,19 @@
fmt(VK_FORMAT_B8G8R8_UINT, UNSUPPORTED),
fmt(VK_FORMAT_B8G8R8_SINT, UNSUPPORTED),
fmt(VK_FORMAT_B8G8R8_SRGB, UNSUPPORTED),
- fmt(VK_FORMAT_B8G8R8A8_UNORM, B8G8R8A8_UNORM, .cpp = 4, .num_channels = 4),
+ fmt(VK_FORMAT_B8G8R8A8_UNORM, B8G8R8A8_UNORM, .bs = 4, .num_channels = 4),
fmt(VK_FORMAT_B8G8R8A8_SNORM, UNSUPPORTED),
fmt(VK_FORMAT_B8G8R8A8_USCALED, UNSUPPORTED),
fmt(VK_FORMAT_B8G8R8A8_SSCALED, UNSUPPORTED),
fmt(VK_FORMAT_B8G8R8A8_UINT, UNSUPPORTED),
fmt(VK_FORMAT_B8G8R8A8_SINT, UNSUPPORTED),
- fmt(VK_FORMAT_B8G8R8A8_SRGB, B8G8R8A8_UNORM_SRGB, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_B10G10R10A2_UNORM, B10G10R10A2_UNORM, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_B10G10R10A2_SNORM, B10G10R10A2_SNORM, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_B10G10R10A2_USCALED, B10G10R10A2_USCALED, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_B10G10R10A2_SSCALED, B10G10R10A2_SSCALED, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_B10G10R10A2_UINT, B10G10R10A2_UINT, .cpp = 4, .num_channels = 4),
- fmt(VK_FORMAT_B10G10R10A2_SINT, B10G10R10A2_SINT, .cpp = 4, .num_channels = 4)
+ fmt(VK_FORMAT_B8G8R8A8_SRGB, B8G8R8A8_UNORM_SRGB, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_B10G10R10A2_UNORM, B10G10R10A2_UNORM, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_B10G10R10A2_SNORM, B10G10R10A2_SNORM, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_B10G10R10A2_USCALED, B10G10R10A2_USCALED, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_B10G10R10A2_SSCALED, B10G10R10A2_SSCALED, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_B10G10R10A2_UINT, B10G10R10A2_UINT, .bs = 4, .num_channels = 4),
+ fmt(VK_FORMAT_B10G10R10A2_SINT, B10G10R10A2_SINT, .bs = 4, .num_channels = 4)
};
#undef fmt
diff --git a/src/vulkan/anv_image.c b/src/vulkan/anv_image.c
index e3991e2..8aa74c2 100644
--- a/src/vulkan/anv_image.c
+++ b/src/vulkan/anv_image.c
@@ -225,7 +225,7 @@
*/
assert(anv_is_aligned(qpitch, j));
- uint32_t stride = align_u32(mt_width * format->cpp, tile_info->width);
+ uint32_t stride = align_u32(mt_width * format->bs, tile_info->width);
if (create_info->stride > 0)
stride = create_info->stride;
@@ -490,14 +490,14 @@
assert(!image->format->has_stencil);
assert(!view_format_info->depth_format);
assert(!view_format_info->has_stencil);
- assert(view_format_info->cpp == image->format->cpp);
+ assert(view_format_info->bs == image->format->bs);
} else if (subresource->aspectMask & ds_flags) {
assert((subresource->aspectMask & ~ds_flags) == 0);
if (subresource->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
assert(image->format->depth_format);
assert(view_format_info->depth_format);
- assert(view_format_info->cpp == image->format->cpp);
+ assert(view_format_info->bs == image->format->bs);
}
if (subresource->aspectMask & VK_IMAGE_ASPECT_STENCIL) {
diff --git a/src/vulkan/anv_meta.c b/src/vulkan/anv_meta.c
index 0f67039..b0f042f 100644
--- a/src/vulkan/anv_meta.c
+++ b/src/vulkan/anv_meta.c
@@ -630,9 +630,9 @@
}
static VkFormat
-vk_format_for_cpp(int cpp)
+vk_format_for_size(int bs)
{
- switch (cpp) {
+ switch (bs) {
case 1: return VK_FORMAT_R8_UINT;
case 2: return VK_FORMAT_R8G8_UINT;
case 3: return VK_FORMAT_R8G8B8_UINT;
@@ -642,7 +642,7 @@
case 12: return VK_FORMAT_R32G32B32_UINT;
case 16: return VK_FORMAT_R32G32B32A32_UINT;
default:
- unreachable("Invalid format cpp");
+ unreachable("Invalid format block size");
}
}
@@ -770,30 +770,30 @@
/* First, we compute the biggest format that can be used with the
* given offsets and size.
*/
- int cpp = 16;
+ int bs = 16;
int fs = ffs(src_offset) - 1;
if (fs != -1)
- cpp = MIN2(cpp, 1 << fs);
- assert(src_offset % cpp == 0);
+ bs = MIN2(bs, 1 << fs);
+ assert(src_offset % bs == 0);
fs = ffs(dest_offset) - 1;
if (fs != -1)
- cpp = MIN2(cpp, 1 << fs);
- assert(dest_offset % cpp == 0);
+ bs = MIN2(bs, 1 << fs);
+ assert(dest_offset % bs == 0);
fs = ffs(pRegions[r].copySize) - 1;
if (fs != -1)
- cpp = MIN2(cpp, 1 << fs);
- assert(pRegions[r].copySize % cpp == 0);
+ bs = MIN2(bs, 1 << fs);
+ assert(pRegions[r].copySize % bs == 0);
- VkFormat copy_format = vk_format_for_cpp(cpp);
+ VkFormat copy_format = vk_format_for_size(bs);
/* This is maximum possible width/height our HW can handle */
uint64_t max_surface_dim = 1 << 14;
/* First, we make a bunch of max-sized copies */
- uint64_t max_copy_size = max_surface_dim * max_surface_dim * cpp;
+ uint64_t max_copy_size = max_surface_dim * max_surface_dim * bs;
while (copy_size > max_copy_size) {
do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
dest_buffer->bo, dest_offset,
@@ -803,10 +803,10 @@
dest_offset += max_copy_size;
}
- uint64_t height = copy_size / (max_surface_dim * cpp);
+ uint64_t height = copy_size / (max_surface_dim * bs);
assert(height < max_surface_dim);
if (height != 0) {
- uint64_t rect_copy_size = height * max_surface_dim * cpp;
+ uint64_t rect_copy_size = height * max_surface_dim * bs;
do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
dest_buffer->bo, dest_offset,
max_surface_dim, height, copy_format);
@@ -818,7 +818,7 @@
if (copy_size != 0) {
do_buffer_copy(cmd_buffer, src_buffer->bo, src_offset,
dest_buffer->bo, dest_offset,
- copy_size / cpp, 1, copy_format);
+ copy_size / bs, 1, copy_format);
}
}
diff --git a/src/vulkan/anv_private.h b/src/vulkan/anv_private.h
index 5009047..03d033f 100644
--- a/src/vulkan/anv_private.h
+++ b/src/vulkan/anv_private.h
@@ -1207,7 +1207,7 @@
const VkFormat vk_format;
const char *name;
uint16_t surface_format; /**< RENDER_SURFACE_STATE.SurfaceFormat */
- uint8_t cpp; /**< Bytes-per-pixel of anv_format::surface_format. */
+ uint8_t bs; /**< Block size (in bytes) of anv_format::surface_format. */
uint8_t num_channels;
uint16_t depth_format; /**< 3DSTATE_DEPTH_BUFFER.SurfaceFormat */
bool has_stencil;
diff --git a/src/vulkan/anv_wsi_wayland.c b/src/vulkan/anv_wsi_wayland.c
index f87f3ff..1828b09 100644
--- a/src/vulkan/anv_wsi_wayland.c
+++ b/src/vulkan/anv_wsi_wayland.c
@@ -60,7 +60,7 @@
return;
/* Don't add formats which aren't supported by the driver */
- if (anv_format_for_vk_format(format)->cpp == 0)
+ if (anv_format_for_vk_format(format)->bs == 0)
return;
f = anv_vector_add(&display->formats);