Fix memory allocation tests with device coherent memory

Extension VK_AMD_device_coherent_memory needs to be enabled in order ot
use VkPhysicalDeviceCoherentMemoryFeaturesAMD

Components: Vulkan

VK-GL-CTS issue: 3973

Affected tests:
dEQP-VK.memory.allocation.*

Change-Id: Ia866ab947bbd1af867f409053f830598965dea12
diff --git a/external/vulkancts/modules/vulkan/memory/vktMemoryAllocationTests.cpp b/external/vulkancts/modules/vulkan/memory/vktMemoryAllocationTests.cpp
index d1381e7..e98b173 100644
--- a/external/vulkancts/modules/vulkan/memory/vktMemoryAllocationTests.cpp
+++ b/external/vulkancts/modules/vulkan/memory/vktMemoryAllocationTests.cpp
@@ -120,12 +120,13 @@
 class BaseAllocateTestInstance : public TestInstance
 {
 public:
-	BaseAllocateTestInstance		(Context& context, bool useDeviceGroups)
-		: TestInstance				(context)
-		, m_useDeviceGroups			(useDeviceGroups)
-		, m_subsetAllocationAllowed	(false)
-		, m_numPhysDevices			(1)
-		, m_memoryProperties		(getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
+	BaseAllocateTestInstance			(Context& context, bool useDeviceGroups)
+		: TestInstance					(context)
+		, m_useDeviceGroups				(useDeviceGroups)
+		, m_subsetAllocationAllowed		(false)
+		, m_numPhysDevices				(1)
+		, m_memoryProperties			(getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
+		, m_deviceCoherentMemSupported	(false)
 	{
 		if (m_useDeviceGroups)
 			createDeviceGroup();
@@ -149,6 +150,7 @@
 	VkMemoryAllocateFlagsInfo				m_allocFlagsInfo;
 	deUint32								m_numPhysDevices;
 	VkPhysicalDeviceMemoryProperties		m_memoryProperties;
+	bool									m_deviceCoherentMemSupported;
 
 private:
 	CustomInstance					m_deviceGroupInstance;
@@ -164,7 +166,8 @@
 	const float										queuePriority				= 1.0f;
 	deUint32										queueFamilyIndex			= 0;
 	bool											protMemSupported			= false;
-	bool											deviceCoherentMemSupported	= false;
+
+	void* pNext = DE_NULL;
 
 	VkPhysicalDeviceProtectedMemoryFeatures protectedMemoryFeature =
 	{
@@ -172,14 +175,15 @@
 		DE_NULL,													// const void*						pNext
 		VK_FALSE													// VkBool32							protectedMemory;
 	};
+	pNext = &protectedMemoryFeature;
 
 	VkPhysicalDeviceCoherentMemoryFeaturesAMD coherentMemoryFeatures =
 	{
 		VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD, // VkStructureType                                      sType
-		&protectedMemoryFeature,										// const void*                                          pNext
+		pNext,															// const void*                                          pNext
 		VK_FALSE                                                        // VkBool32                                             deviceCoherentMemory;
 	};
-
+	if (m_context.isDeviceFunctionalitySupported("VK_AMD_device_coherent_memory")) pNext = &coherentMemoryFeatures;
 
 	VkPhysicalDeviceFeatures				features;
 	deMemset(&features, 0, sizeof(vk::VkPhysicalDeviceFeatures));
@@ -187,14 +191,14 @@
 	VkPhysicalDeviceFeatures2				features2		=
 	{
 		VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,				// VkStructureType					sType
-		&coherentMemoryFeatures,									// const void*						pNext
+		pNext,														// const void*						pNext
 		features													// VkPhysicalDeviceFeatures			features
 	};
 
 	// Check if the physical device supports the protected memory feature
 	instanceDriver.getPhysicalDeviceFeatures2(m_context.getPhysicalDevice(), &features2);
-	protMemSupported			= protectedMemoryFeature.protectedMemory;
-	deviceCoherentMemSupported	= coherentMemoryFeatures.deviceCoherentMemory;
+	protMemSupported				= protectedMemoryFeature.protectedMemory;
+	m_deviceCoherentMemSupported	= coherentMemoryFeatures.deviceCoherentMemory;
 
 	VkDeviceQueueCreateFlags queueCreateFlags = protMemSupported ? (vk::VkDeviceQueueCreateFlags)vk::VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0u;
 
@@ -211,7 +215,7 @@
 	const VkDeviceCreateInfo						deviceInfo		=
 	{
 		VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,											// VkStructureType					sType;
-		(protMemSupported || deviceCoherentMemSupported) ? &features2 : DE_NULL,		// const void*						pNext;
+		(protMemSupported || m_deviceCoherentMemSupported) ? &features2 : DE_NULL,		// const void*						pNext;
 		(VkDeviceCreateFlags)0,															// VkDeviceCreateFlags				flags;
 		1u,																				// uint32_t							queueCreateInfoCount;
 		&queueInfo,																		// const VkDeviceQueueCreateInfo*	pQueueCreateInfos;
@@ -219,7 +223,7 @@
 		DE_NULL,																		// const char* const*				ppEnabledLayerNames;
 		0u,																				// uint32_t							enabledExtensionCount;
 		DE_NULL,																		// const char* const*				ppEnabledExtensionNames;
-		(protMemSupported || deviceCoherentMemSupported) ? DE_NULL : &deviceFeatures	// const VkPhysicalDeviceFeatures*	pEnabledFeatures;
+		(protMemSupported || m_deviceCoherentMemSupported) ? DE_NULL : &deviceFeatures	// const VkPhysicalDeviceFeatures*	pEnabledFeatures;
 	};
 
 	m_logicalDevice		= createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(), m_context.getPlatformInterface(), instance, instanceDriver, m_context.getPhysicalDevice(), &deviceInfo);
@@ -348,16 +352,23 @@
 			DE_FATAL("Unknown allocation order");
 	}
 
-	try
+	bool memoryTypeSupported = true;
+#ifndef CTS_USES_VULKANSC
+	memoryTypeSupported = !((m_memoryProperties.memoryTypes[m_memoryTypeIndex].propertyFlags & vk::VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) > 0 && !m_deviceCoherentMemSupported);
+#endif
+
+	if (memoryTypeSupported)
 	{
-		const VkMemoryType		memoryType				= m_memoryProperties.memoryTypes[m_memoryTypeIndex];
-		const VkMemoryHeap		memoryHeap				= m_memoryProperties.memoryHeaps[memoryType.heapIndex];
-
-		// Create a buffer to get the required size
+		try
 		{
-			const VkDeviceSize bufferSize	= m_config.memorySize ? *m_config.memorySize : (VkDeviceSize) (*m_config.memoryPercentage * (float) memoryHeap.size);
+			const VkMemoryType		memoryType = m_memoryProperties.memoryTypes[m_memoryTypeIndex];
+			const VkMemoryHeap		memoryHeap = m_memoryProperties.memoryHeaps[memoryType.heapIndex];
 
-			VkBufferCreateInfo bufferParams =
+			// Create a buffer to get the required size
+			{
+				const VkDeviceSize bufferSize = m_config.memorySize ? *m_config.memorySize : (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size);
+
+				VkBufferCreateInfo bufferParams =
 				{
 					VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,                       // VkStructureType          sType;
 					DE_NULL,                                                    // const void*              pNext;
@@ -369,144 +380,145 @@
 					&queueFamilyIndex,                                          // const uint32_t*          pQueueFamilyIndices;
 				};
 
-			buffer = createBuffer(vkd, device, &bufferParams);
-			vkd.getBufferMemoryRequirements(device, *buffer, &memReqs);
-		}
+				buffer = createBuffer(vkd, device, &bufferParams);
+				vkd.getBufferMemoryRequirements(device, *buffer, &memReqs);
+			}
 
-		const VkDeviceSize		allocationSize	= (m_config.memorySize ? memReqs.size : (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size));
-		const VkDeviceSize		roundedUpAllocationSize	 = roundUpToNextMultiple(allocationSize, m_memoryLimits.deviceMemoryAllocationGranularity);
-		vector<VkDeviceMemory>	memoryObjects	(m_config.memoryAllocationCount, (VkDeviceMemory)0);
+			const VkDeviceSize		allocationSize = (m_config.memorySize ? memReqs.size : (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size));
+			const VkDeviceSize		roundedUpAllocationSize = roundUpToNextMultiple(allocationSize, m_memoryLimits.deviceMemoryAllocationGranularity);
+			vector<VkDeviceMemory>	memoryObjects(m_config.memoryAllocationCount, (VkDeviceMemory)0);
 
-		log << TestLog::Message << "Memory type index: " << m_memoryTypeIndex << TestLog::EndMessage;
+			log << TestLog::Message << "Memory type index: " << m_memoryTypeIndex << TestLog::EndMessage;
 
-		if (memoryType.heapIndex >= m_memoryProperties.memoryHeapCount)
-			m_result.fail("Invalid heap index defined for memory type.");
+			if (memoryType.heapIndex >= m_memoryProperties.memoryHeapCount)
+				m_result.fail("Invalid heap index defined for memory type.");
 
-		{
-			log << TestLog::Message << "Memory type: " << memoryType << TestLog::EndMessage;
-			log << TestLog::Message << "Memory heap: " << memoryHeap << TestLog::EndMessage;
+			{
+				log << TestLog::Message << "Memory type: " << memoryType << TestLog::EndMessage;
+				log << TestLog::Message << "Memory heap: " << memoryHeap << TestLog::EndMessage;
 
-			if (roundedUpAllocationSize * m_config.memoryAllocationCount > memoryHeap.size)
-				TCU_THROW(NotSupportedError, "Memory heap doesn't have enough memory.");
+				if (roundedUpAllocationSize * m_config.memoryAllocationCount > memoryHeap.size)
+					TCU_THROW(NotSupportedError, "Memory heap doesn't have enough memory.");
 
 #if (DE_PTR_SIZE == 4)
-			// For 32-bit binaries we cap the total host visible allocations to 1.5GB to
-			// avoid exhausting CPU virtual address space and throwing a false negative result.
-			if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
-				allocationSize * m_config.memoryAllocationCount * (m_subsetAllocationAllowed ? 1 : m_numPhysDevices) >= 1610612736)
+				// For 32-bit binaries we cap the total host visible allocations to 1.5GB to
+				// avoid exhausting CPU virtual address space and throwing a false negative result.
+				if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
+					allocationSize * m_config.memoryAllocationCount * (m_subsetAllocationAllowed ? 1 : m_numPhysDevices) >= 1610612736)
 
-				log << TestLog::Message << "    Skipping: Not enough CPU virtual address space for all host visible allocations." << TestLog::EndMessage;
-			else
-			{
+					log << TestLog::Message << "    Skipping: Not enough CPU virtual address space for all host visible allocations." << TestLog::EndMessage;
+				else
+				{
 #else
-			{
+					{
 #endif
 
-				try
-				{
-					const deUint32 totalDeviceMaskCombinations = m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1;
-					for (deUint32 deviceMask = 1; deviceMask <= totalDeviceMaskCombinations; deviceMask++)
-					{
-						// Allocate on all physical devices if subset allocation is not allowed, do only once.
-						if (!m_subsetAllocationAllowed)
-							deviceMask = (1 << m_numPhysDevices) - 1;
-						m_allocFlagsInfo.deviceMask = deviceMask;
-
-						if (m_config.order == TestConfig::ALLOC_FREE || m_config.order == TestConfig::ALLOC_REVERSE_FREE)
+						try
 						{
-							for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
+							const deUint32 totalDeviceMaskCombinations = m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1;
+							for (deUint32 deviceMask = 1; deviceMask <= totalDeviceMaskCombinations; deviceMask++)
 							{
-								VkMemoryAllocateInfo	alloc	=
+								// Allocate on all physical devices if subset allocation is not allowed, do only once.
+								if (!m_subsetAllocationAllowed)
+									deviceMask = (1 << m_numPhysDevices) - 1;
+								m_allocFlagsInfo.deviceMask = deviceMask;
+
+								if (m_config.order == TestConfig::ALLOC_FREE || m_config.order == TestConfig::ALLOC_REVERSE_FREE)
 								{
-									VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,				// sType
-									m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL,	// pNext
-									allocationSize,										// allocationSize
-									m_memoryTypeIndex									// memoryTypeIndex;
-								};
-
-								VkResult				res		= vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]);
-
-								// Some implementations might have limitations on protected heap, and these limitations
-								// don't show up in Vulkan queries. Use a hard coded threshold after which out of memory
-								// is allowed.
-								if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY && memoryType.propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT && ndx > 80)
-									break;
-
-								VK_CHECK(res);
-
-								TCU_CHECK(!!memoryObjects[ndx]);
-							}
-
-							if (m_config.order == TestConfig::ALLOC_FREE)
-							{
-								for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
-								{
-									const VkDeviceMemory mem = memoryObjects[memoryObjects.size() - 1 - ndx];
-
-									if (!!mem)
+									for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
 									{
-										vkd.freeMemory(device, mem, (const VkAllocationCallbacks *) DE_NULL);
-										memoryObjects[memoryObjects.size() - 1 - ndx] = (VkDeviceMemory) 0;
+										VkMemoryAllocateInfo	alloc =
+										{
+											VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,				// sType
+											m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL,	// pNext
+											allocationSize,										// allocationSize
+											m_memoryTypeIndex									// memoryTypeIndex;
+										};
+
+										VkResult				res = vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]);
+
+										// Some implementations might have limitations on protected heap, and these limitations
+										// don't show up in Vulkan queries. Use a hard coded threshold after which out of memory
+										// is allowed.
+										if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY && memoryType.propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT && ndx > 80)
+											break;
+
+										VK_CHECK(res);
+
+										TCU_CHECK(!!memoryObjects[ndx]);
+									}
+
+									if (m_config.order == TestConfig::ALLOC_FREE)
+									{
+										for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
+										{
+											const VkDeviceMemory mem = memoryObjects[memoryObjects.size() - 1 - ndx];
+
+											if (!!mem)
+											{
+												vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
+												memoryObjects[memoryObjects.size() - 1 - ndx] = (VkDeviceMemory)0;
+											}
+										}
+									}
+									else
+									{
+										for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
+										{
+											const VkDeviceMemory mem = memoryObjects[ndx];
+
+											if (!!mem)
+											{
+												vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
+												memoryObjects[ndx] = (VkDeviceMemory)0;
+											}
+										}
 									}
 								}
-							}
-							else
-							{
-								for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
+								else
 								{
-									const VkDeviceMemory mem = memoryObjects[ndx];
-
-									if (!!mem)
+									for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
 									{
-										vkd.freeMemory(device, mem, (const VkAllocationCallbacks *) DE_NULL);
-										memoryObjects[ndx] = (VkDeviceMemory) 0;
+										const VkMemoryAllocateInfo alloc =
+										{
+											VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,				// sType
+											m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL,	// pNext
+											allocationSize,										// allocationSize
+											m_memoryTypeIndex									// memoryTypeIndex;
+										};
+
+										VK_CHECK(vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]));
+										TCU_CHECK(!!memoryObjects[ndx]);
+
+										vkd.freeMemory(device, memoryObjects[ndx], (const VkAllocationCallbacks*)DE_NULL);
+										memoryObjects[ndx] = (VkDeviceMemory)0;
 									}
 								}
 							}
 						}
-						else
+						catch (...)
 						{
 							for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
 							{
-								const VkMemoryAllocateInfo alloc =
+								const VkDeviceMemory mem = memoryObjects[ndx];
+
+								if (!!mem)
 								{
-									VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,				// sType
-									m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL,	// pNext
-									allocationSize,										// allocationSize
-									m_memoryTypeIndex									// memoryTypeIndex;
-								};
-
-								VK_CHECK(vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]));
-								TCU_CHECK(!!memoryObjects[ndx]);
-
-								vkd.freeMemory(device, memoryObjects[ndx], (const VkAllocationCallbacks*)DE_NULL);
-								memoryObjects[ndx] = (VkDeviceMemory)0;
+									vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
+									memoryObjects[ndx] = (VkDeviceMemory)0;
+								}
 							}
+
+							throw;
 						}
 					}
 				}
-				catch (...)
-				{
-					for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
-					{
-						const VkDeviceMemory mem = memoryObjects[ndx];
-
-						if (!!mem)
-						{
-							vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
-							memoryObjects[ndx] = (VkDeviceMemory)0;
-						}
-					}
-
-					throw;
-				}
 			}
+		catch (const tcu::TestError& error)
+		{
+			m_result.fail(error.getMessage());
 		}
 	}
-	catch (const tcu::TestError& error)
-	{
-		m_result.fail(error.getMessage());
-	}
 
 	m_memoryTypeIndex++;
 
@@ -626,6 +638,12 @@
 
 		TCU_CHECK(type.type.heapIndex < m_memoryProperties.memoryHeapCount);
 
+		if ((m_memoryProperties.memoryTypes[type.index].propertyFlags & vk::VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) > 0 && !m_deviceCoherentMemSupported)
+		{
+			continue;
+		}
+
+
 		m_heaps[type.type.heapIndex].types.push_back(type);
 	}
 }