Out of Bounds Read and Write in armnn
This change makes sure that a parameter has space in the memory pool
taking into account the parameter's offset.
Bug: 256589676
Test: manual run of the poc in the ticket
Change-Id: I664e1ea2ab7fc9c5d22bcb32ce8ddc2f581fe7a8
(cherry picked from commit 6592f1a4d52747eca26b04e2e7c7e36dd9a7b1db)
Merged-In: I664e1ea2ab7fc9c5d22bcb32ce8ddc2f581fe7a8
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index b3ba627..f5c73cf 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -225,12 +225,31 @@
const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
- if (inputTensor.GetMemoryArea() == nullptr)
+ uint32_t poolIndex = inputArg.location.poolIndex;
+ if (poolIndex >= memPools.size())
+ {
+ ALOGE("Cannot execute request. Error converting request input %u to tensor: wrong poolIndex", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ uint8_t* inputTensorBegin = static_cast<uint8_t*>(inputTensor.GetMemoryArea());
+ if (inputTensorBegin == nullptr)
{
ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
return V1_0::ErrorStatus::GENERAL_FAILURE;
}
+ const size_t inputTensorSize = inputTensorInfo.GetNumBytes();
+ uint8_t* memoryPoolBegin = memPools[poolIndex].getBuffer();
+ uint32_t memoryPoolSize = memPools[poolIndex].getSize();
+ bool inputTensorIsOutOfMemoryRage = (inputTensorBegin + inputTensorSize) > (memoryPoolBegin + memoryPoolSize);
+
+ if (inputTensorIsOutOfMemoryRage)
+ {
+ ALOGE("Cannot execute request. Error converting request input %u to tensor: out of Memory Pool", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
inputs.emplace_back(i, inputTensor);
}
@@ -251,7 +270,8 @@
const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
- if (outputTensor.GetMemoryArea() == nullptr)
+ uint8_t* outputTensorBegin = static_cast<uint8_t*>(outputTensor.GetMemoryArea());
+ if (outputTensorBegin == nullptr)
{
ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
return V1_0::ErrorStatus::GENERAL_FAILURE;
@@ -272,6 +292,22 @@
return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
}
+ uint32_t poolIndex = outputArg.location.poolIndex;
+ if (poolIndex >= memPools.size())
+ {
+ ALOGE("Cannot execute request. Error converting request output %u to tensor: wrong poolIndex", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
+ uint8_t* memoryPoolBegin = memPools[poolIndex].getBuffer();
+ uint32_t memoryPoolSize = memPools[poolIndex].getSize();
+ bool outputTensorIsOutOfMemoryRage = (outputTensorBegin + outputSize) > (memoryPoolBegin + memoryPoolSize);
+ if (outputTensorIsOutOfMemoryRage)
+ {
+ ALOGE("Cannot execute request. Error converting request output %u to tensor: out of Memory Pool", i);
+ return V1_0::ErrorStatus::GENERAL_FAILURE;
+ }
+
outputs.emplace_back(i, outputTensor);
outputShapes[i] = ComputeShape(outputTensorInfo);
}