Merge tag 'android-13.0.0_r32' into int/13/fp3
Android 13.0.0 release 32
* tag 'android-13.0.0_r32':
Out of Bounds Read and Write in armnn
Change-Id: Ifdec16b25b6fe5faf4ef6c794f21c7b964edad2c
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 0fd1728..cfa8a74 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -180,12 +180,32 @@
const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPools);
- if (inputTensor.GetMemoryArea() == nullptr)
+
+ uint32_t poolIndex = inputArg.location.poolIndex;
+ if (poolIndex >= pMemPools->size())
+ {
+ ALOGE("Cannot execute request. Error converting request input %u to tensor: wrong poolIndex", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ uint8_t* inputTensorBegin = static_cast<uint8_t*>(inputTensor.GetMemoryArea());
+ if (inputTensorBegin == nullptr)
{
ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
return V1_0::ErrorStatus::GENERAL_FAILURE;
}
+ const size_t inputTensorSize = inputTensorInfo.GetNumBytes();
+ uint8_t* memoryPoolBegin = (*pMemPools)[poolIndex].getBuffer();
+ uint32_t memoryPoolSize = (*pMemPools)[poolIndex].getSize();
+ bool inputTensorIsOutOfMemoryRage = (inputTensorBegin + inputTensorSize) > (memoryPoolBegin + memoryPoolSize);
+
+ if (inputTensorIsOutOfMemoryRage)
+ {
+ ALOGE("Cannot execute request. Error converting request input %u to tensor: out of Memory Pool", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
pInputTensors->emplace_back(i, inputTensor);
}
@@ -196,12 +216,32 @@
const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, *pMemPools);
- if (outputTensor.GetMemoryArea() == nullptr)
+
+ uint32_t poolIndex = outputArg.location.poolIndex;
+ if (poolIndex >= pMemPools->size())
+ {
+ ALOGE("Cannot execute request. Error converting request output %u to tensor: wrong poolIndex", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ uint8_t* outputTensorBegin = static_cast<uint8_t*>(outputTensor.GetMemoryArea());
+ if (outputTensorBegin == nullptr)
{
ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
return V1_0::ErrorStatus::GENERAL_FAILURE;
}
+ const size_t outputTensorSize = outputTensorInfo.GetNumBytes();
+ uint8_t* memoryPoolBegin = (*pMemPools)[poolIndex].getBuffer();
+ uint32_t memoryPoolSize = (*pMemPools)[poolIndex].getSize();
+ bool outputTensorIsOutOfMemoryRage = (outputTensorBegin + outputTensorSize) > (memoryPoolBegin + memoryPoolSize);
+
+ if (outputTensorIsOutOfMemoryRage)
+ {
+ ALOGE("Cannot execute request. Error converting request output %u to tensor: out of Memory Pool", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
pOutputTensors->emplace_back(i, outputTensor);
}
}
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index b3ba627..f5c73cf 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -225,12 +225,31 @@
const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
- if (inputTensor.GetMemoryArea() == nullptr)
+ uint32_t poolIndex = inputArg.location.poolIndex;
+ if (poolIndex >= memPools.size())
+ {
+ ALOGE("Cannot execute request. Error converting request input %u to tensor: wrong poolIndex", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ uint8_t* inputTensorBegin = static_cast<uint8_t*>(inputTensor.GetMemoryArea());
+ if (inputTensorBegin == nullptr)
{
ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
return V1_0::ErrorStatus::GENERAL_FAILURE;
}
+ const size_t inputTensorSize = inputTensorInfo.GetNumBytes();
+ uint8_t* memoryPoolBegin = memPools[poolIndex].getBuffer();
+ uint32_t memoryPoolSize = memPools[poolIndex].getSize();
+ bool inputTensorIsOutOfMemoryRage = (inputTensorBegin + inputTensorSize) > (memoryPoolBegin + memoryPoolSize);
+
+ if (inputTensorIsOutOfMemoryRage)
+ {
+ ALOGE("Cannot execute request. Error converting request input %u to tensor: out of Memory Pool", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
inputs.emplace_back(i, inputTensor);
}
@@ -251,7 +270,8 @@
const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
- if (outputTensor.GetMemoryArea() == nullptr)
+ uint8_t* outputTensorBegin = static_cast<uint8_t*>(outputTensor.GetMemoryArea());
+ if (outputTensorBegin == nullptr)
{
ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
return V1_0::ErrorStatus::GENERAL_FAILURE;
@@ -272,6 +292,22 @@
return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
}
+ uint32_t poolIndex = outputArg.location.poolIndex;
+ if (poolIndex >= memPools.size())
+ {
+ ALOGE("Cannot execute request. Error converting request output %u to tensor: wrong poolIndex", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ uint8_t* memoryPoolBegin = memPools[poolIndex].getBuffer();
+ uint32_t memoryPoolSize = memPools[poolIndex].getSize();
+ bool outputTensorIsOutOfMemoryRage = (outputTensorBegin + outputSize) > (memoryPoolBegin + memoryPoolSize);
+ if (outputTensorIsOutOfMemoryRage)
+ {
+ ALOGE("Cannot execute request. Error converting request output %u to tensor: out of Memory Pool", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
outputs.emplace_back(i, outputTensor);
outputShapes[i] = ComputeShape(outputTensorInfo);
}
diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp
index fd58422..bd5261e 100644
--- a/ArmnnPreparedModel_1_3.cpp
+++ b/ArmnnPreparedModel_1_3.cpp
@@ -418,12 +418,31 @@
const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
- if (inputTensor.GetMemoryArea() == nullptr)
+ uint32_t poolIndex = inputArg.location.poolIndex;
+ if (poolIndex >= memPools.size())
+ {
+ ALOGE("Cannot execute request. Error converting request input %u to tensor: wrong poolIndex", i);
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ uint8_t* inputTensorBegin = static_cast<uint8_t*>(inputTensor.GetMemoryArea());
+ if (inputTensorBegin == nullptr)
{
ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
return V1_3::ErrorStatus::GENERAL_FAILURE;
}
+ const size_t inputTensorSize = inputTensorInfo.GetNumBytes();
+ uint8_t* memoryPoolBegin = memPools[poolIndex].getBuffer();
+ uint32_t memoryPoolSize = memPools[poolIndex].getSize();
+ bool inputTensorIsOutOfMemoryRage = (inputTensorBegin + inputTensorSize) > (memoryPoolBegin + memoryPoolSize);
+
+ if (inputTensorIsOutOfMemoryRage)
+ {
+ ALOGE("Cannot execute request. Error converting request input %u to tensor: out of Memory Pool", i);
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
+ }
+
inputs.emplace_back(i, inputTensor);
}
@@ -444,13 +463,29 @@
armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
- if (outputTensor.GetMemoryArea() == nullptr)
+ uint8_t* outputTensorBegin = static_cast<uint8_t*>(outputTensor.GetMemoryArea());
+ if (outputTensorBegin == nullptr)
{
ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
return V1_3::ErrorStatus::GENERAL_FAILURE;
}
const size_t outputSize = outputTensorInfo.GetNumBytes();
+ uint32_t poolIndex = outputArg.location.poolIndex;
+ if (poolIndex >= memPools.size())
+ {
+ ALOGE("Cannot execute request. Error converting request output %u to tensor: wrong poolIndex", i);
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ uint8_t* memoryPoolBegin = memPools[poolIndex].getBuffer();
+ uint32_t memoryPoolSize = memPools[poolIndex].getSize();
+ bool outputTensorIsOutOfMemoryRage = (outputTensorBegin + outputSize) > (memoryPoolBegin + memoryPoolSize);
+ if (outputTensorIsOutOfMemoryRage)
+ {
+ ALOGE("Cannot execute request. Error converting request output %u to tensor: out of Memory Pool", i);
+ return V1_3::ErrorStatus::GENERAL_FAILURE;
+ }
unsigned int count = 0;
std::for_each(outputArg.dimensions.begin(), outputArg.dimensions.end(), [&](auto dim)