Making padding explicit, instead of SAME / VALID

  - Use explicit padding {padding_left, padding_right, padding_top,
    padding_bottom} for applicable ops.
  - Updated the corresponding tests.

Bug: 63905942
Test: mm
Test: updated NeuralNetworkTest pass
Change-Id: Ifb8ccf59fbe5766dffe1537aa5357192a7c88a70
diff --git a/nn/common/CpuExecutor.cpp b/nn/common/CpuExecutor.cpp
index d974a86..3ff326b 100644
--- a/nn/common/CpuExecutor.cpp
+++ b/nn/common/CpuExecutor.cpp
@@ -314,25 +314,30 @@
             }
         } break;
         case OperationType::DEPTHWISE_CONV_2D: {
-            if (!parameterCountIs(8, 1)) {
+            if (!parameterCountIs(11, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
             }
             const RunTimeOperandInfo& input  = mOperands[ins[0]];
             const RunTimeOperandInfo& filter = mOperands[ins[1]];
             const RunTimeOperandInfo& bias   = mOperands[ins[2]];
 
-            int32_t padding          = getScalarData<int32_t>(mOperands[ins[3]]);
-            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[4]]);
-            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[5]]);
-            int32_t depth_multiplier = getScalarData<int32_t>(mOperands[ins[6]]);
-            int32_t activation       = getScalarData<int32_t>(mOperands[ins[7]]);
+            int32_t padding_left     = getScalarData<int32_t>(mOperands[ins[3]]);
+            int32_t padding_right    = getScalarData<int32_t>(mOperands[ins[4]]);
+            int32_t padding_top      = getScalarData<int32_t>(mOperands[ins[5]]);
+            int32_t padding_bottom   = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[7]]);
+            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[8]]);
+            int32_t depth_multiplier = getScalarData<int32_t>(mOperands[ins[9]]);
+            int32_t activation       = getScalarData<int32_t>(mOperands[ins[10]]);
 
             RunTimeOperandInfo& output = mOperands[outs[0]];
             Shape outShape = output.shape();
 
             if (operation.opTuple.operandType == OperandType::TENSOR_FLOAT32) {
                 success = depthwiseConvPrepare(input.shape(), filter.shape(), bias.shape(),
-                                               padding, stride_width, stride_height,
+                                               padding_left, padding_right,
+                                               padding_top, padding_bottom,
+                                               stride_width, stride_height,
                                                &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           depthwiseConvFloat32(reinterpret_cast<const float*>(input.buffer),
@@ -341,13 +346,17 @@
                                                filter.shape(),
                                                reinterpret_cast<const float*>(bias.buffer),
                                                bias.shape(),
-                                               padding, stride_width, stride_height,
+                                               padding_left, padding_right,
+                                               padding_top, padding_bottom,
+                                               stride_width, stride_height,
                                                depth_multiplier, activation,
                                                reinterpret_cast<float*>(output.buffer),
                                                outShape);
             } else if (operation.opTuple.operandType == OperandType::TENSOR_QUANT8_ASYMM) {
                 success = depthwiseConvPrepare(input.shape(), filter.shape(), bias.shape(),
-                                               padding, stride_width, stride_height,
+                                               padding_left, padding_right,
+                                               padding_top, padding_bottom,
+                                               stride_width, stride_height,
                                                &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           depthwiseConvQuant8(reinterpret_cast<const uint8_t*>(input.buffer),
@@ -356,7 +365,9 @@
                                               filter.shape(),
                                               reinterpret_cast<const int32_t*>(bias.buffer),
                                               bias.shape(),
-                                              padding, stride_width, stride_height,
+                                              padding_left, padding_right,
+                                              padding_top, padding_bottom,
+                                              stride_width, stride_height,
                                               depth_multiplier, activation,
                                               reinterpret_cast<uint8_t*>(output.buffer),
                                               outShape);
@@ -364,34 +375,43 @@
 
         } break;
         case OperationType::CONV_2D: {
-            if (!parameterCountIs(7, 1)) {
+            if (!parameterCountIs(10, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
             }
             const RunTimeOperandInfo& input  = mOperands[ins[0]];
             const RunTimeOperandInfo& filter = mOperands[ins[1]];
             const RunTimeOperandInfo& bias   = mOperands[ins[2]];
 
-            int32_t padding          = getScalarData<int32_t>(mOperands[ins[3]]);
-            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[4]]);
-            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[5]]);
-            int32_t activation       = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t padding_left     = getScalarData<int32_t>(mOperands[ins[3]]);
+            int32_t padding_right    = getScalarData<int32_t>(mOperands[ins[4]]);
+            int32_t padding_top      = getScalarData<int32_t>(mOperands[ins[5]]);
+            int32_t padding_bottom   = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[7]]);
+            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[8]]);
+            int32_t activation       = getScalarData<int32_t>(mOperands[ins[9]]);
 
             RunTimeOperandInfo& output = mOperands[outs[0]];
             Shape outShape = output.shape();
 
             if (operation.opTuple.operandType == OperandType::TENSOR_FLOAT32) {
                 success = convPrepare(input.shape(), filter.shape(), bias.shape(),
-                                      padding, stride_width, stride_height,
+                                      padding_left, padding_right,
+                                      padding_top, padding_bottom,
+                                      stride_width, stride_height,
                                       &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           convFloat32(reinterpret_cast<const float*>(input.buffer), input.shape(),
                                       reinterpret_cast<const float*>(filter.buffer), filter.shape(),
                                       reinterpret_cast<const float*>(bias.buffer), bias.shape(),
-                                      padding, stride_width, stride_height, activation,
+                                      padding_left, padding_right,
+                                      padding_top, padding_bottom,
+                                      stride_width, stride_height, activation,
                                       reinterpret_cast<float*>(output.buffer), outShape);
             } else if (operation.opTuple.operandType == OperandType::TENSOR_QUANT8_ASYMM) {
                 success = convPrepare(input.shape(), filter.shape(), bias.shape(),
-                                      padding, stride_width, stride_height,
+                                      padding_left, padding_right,
+                                      padding_top, padding_bottom,
+                                      stride_width, stride_height,
                                       &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           convQuant8(reinterpret_cast<const uint8_t*>(input.buffer),
@@ -400,120 +420,151 @@
                                      filter.shape(),
                                      reinterpret_cast<const int32_t*>(bias.buffer),
                                      bias.shape(),
-                                     padding, stride_width, stride_height, activation,
+                                     padding_left, padding_right,
+                                     padding_top, padding_bottom,
+                                     stride_width, stride_height, activation,
                                      reinterpret_cast<uint8_t*>(output.buffer),
                                      outShape);
             }
         } break;
         case OperationType::AVERAGE_POOL_2D: {
-            if (!parameterCountIs(7, 1)) {
+            if (!parameterCountIs(10, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
             }
             const RunTimeOperandInfo& input = mOperands[ins[0]];
 
-            int32_t padding          = getScalarData<int32_t>(mOperands[ins[1]]);
-            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[2]]);
-            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[3]]);
-            int32_t filter_width     = getScalarData<int32_t>(mOperands[ins[4]]);
-            int32_t filter_height    = getScalarData<int32_t>(mOperands[ins[5]]);
-            int32_t activation       = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t padding_left     = getScalarData<int32_t>(mOperands[ins[1]]);
+            int32_t padding_right    = getScalarData<int32_t>(mOperands[ins[2]]);
+            int32_t padding_top      = getScalarData<int32_t>(mOperands[ins[3]]);
+            int32_t padding_bottom   = getScalarData<int32_t>(mOperands[ins[4]]);
+            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[5]]);
+            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t filter_width     = getScalarData<int32_t>(mOperands[ins[7]]);
+            int32_t filter_height    = getScalarData<int32_t>(mOperands[ins[8]]);
+            int32_t activation       = getScalarData<int32_t>(mOperands[ins[9]]);
 
             RunTimeOperandInfo& output = mOperands[outs[0]];
             Shape outShape = output.shape();
 
             if (operation.opTuple.operandType == OperandType::TENSOR_FLOAT32) {
                 success = genericPoolingPrepare(input.shape(),
-                                                padding, stride_width, stride_height,
+                                                padding_left, padding_right,
+                                                padding_top, padding_bottom,
+                                                stride_width, stride_height,
                                                 filter_width, filter_height,
                                                 &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           averagePoolFloat32(reinterpret_cast<const float*>(input.buffer),
                                              input.shape(),
-                                             padding, stride_width, stride_height,
+                                             padding_left, padding_right,
+                                             padding_top, padding_bottom,
+                                             stride_width, stride_height,
                                              filter_width, filter_height, activation,
                                              reinterpret_cast<float*>(output.buffer),
                                              outShape);
             } else if (operation.opTuple.operandType == OperandType::TENSOR_QUANT8_ASYMM) {
                 success = genericPoolingPrepare(input.shape(),
-                                                padding, stride_width, stride_height,
+                                                padding_left, padding_right,
+                                                padding_top, padding_bottom,
+                                                stride_width, stride_height,
                                                 filter_width, filter_height,
                                                 &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           averagePoolQuant8(reinterpret_cast<const uint8_t*>(input.buffer),
                                             input.shape(),
-                                            padding, stride_width, stride_height,
+                                            padding_left, padding_right,
+                                            padding_top, padding_bottom,
+                                            stride_width, stride_height,
                                             filter_width, filter_height, activation,
                                             reinterpret_cast<uint8_t*>(output.buffer),
                                             outShape);
             }
         } break;
         case OperationType::L2_POOL_2D: {
-            if (!parameterCountIs(7, 1)) {
+            if (!parameterCountIs(10, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
             }
             const RunTimeOperandInfo& input = mOperands[ins[0]];
 
-            int32_t padding          = getScalarData<int32_t>(mOperands[ins[1]]);
-            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[2]]);
-            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[3]]);
-            int32_t filter_width     = getScalarData<int32_t>(mOperands[ins[4]]);
-            int32_t filter_height    = getScalarData<int32_t>(mOperands[ins[5]]);
-            int32_t activation       = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t padding_left     = getScalarData<int32_t>(mOperands[ins[1]]);
+            int32_t padding_right    = getScalarData<int32_t>(mOperands[ins[2]]);
+            int32_t padding_top      = getScalarData<int32_t>(mOperands[ins[3]]);
+            int32_t padding_bottom   = getScalarData<int32_t>(mOperands[ins[4]]);
+            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[5]]);
+            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t filter_width     = getScalarData<int32_t>(mOperands[ins[7]]);
+            int32_t filter_height    = getScalarData<int32_t>(mOperands[ins[8]]);
+            int32_t activation       = getScalarData<int32_t>(mOperands[ins[9]]);
 
             RunTimeOperandInfo& output = mOperands[outs[0]];
             Shape outShape = output.shape();
 
             if (operation.opTuple.operandType == OperandType::TENSOR_FLOAT32) {
                 success = genericPoolingPrepare(input.shape(),
-                                                padding, stride_width, stride_height,
+                                                padding_left, padding_right,
+                                                padding_top, padding_bottom,
+                                                stride_width, stride_height,
                                                 filter_width, filter_height,
                                                 &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           l2PoolFloat32(reinterpret_cast<const float*>(input.buffer),
                                         input.shape(),
-                                        padding, stride_width, stride_height,
+                                        padding_left, padding_right,
+                                        padding_top, padding_bottom,
+                                        stride_width, stride_height,
                                         filter_width, filter_height, activation,
                                         reinterpret_cast<float*>(output.buffer),
                                         outShape);
             }
         } break;
         case OperationType::MAX_POOL_2D: {
-            if (!parameterCountIs(7, 1)) {
+            if (!parameterCountIs(10, 1)) {
                 return ANEURALNETWORKS_BAD_DATA;
             }
             const RunTimeOperandInfo& input = mOperands[ins[0]];
 
-            int32_t padding          = getScalarData<int32_t>(mOperands[ins[1]]);
-            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[2]]);
-            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[3]]);
-            int32_t filter_width     = getScalarData<int32_t>(mOperands[ins[4]]);
-            int32_t filter_height    = getScalarData<int32_t>(mOperands[ins[5]]);
-            int32_t activation       = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t padding_left     = getScalarData<int32_t>(mOperands[ins[1]]);
+            int32_t padding_right    = getScalarData<int32_t>(mOperands[ins[2]]);
+            int32_t padding_top      = getScalarData<int32_t>(mOperands[ins[3]]);
+            int32_t padding_bottom   = getScalarData<int32_t>(mOperands[ins[4]]);
+            int32_t stride_width     = getScalarData<int32_t>(mOperands[ins[5]]);
+            int32_t stride_height    = getScalarData<int32_t>(mOperands[ins[6]]);
+            int32_t filter_width     = getScalarData<int32_t>(mOperands[ins[7]]);
+            int32_t filter_height    = getScalarData<int32_t>(mOperands[ins[8]]);
+            int32_t activation       = getScalarData<int32_t>(mOperands[ins[9]]);
 
             RunTimeOperandInfo& output = mOperands[outs[0]];
             Shape outShape = output.shape();
 
             if (operation.opTuple.operandType == OperandType::TENSOR_FLOAT32) {
                 success = genericPoolingPrepare(input.shape(),
-                                                padding, stride_width, stride_height,
+                                                padding_left, padding_right,
+                                                padding_top, padding_bottom,
+                                                stride_width, stride_height,
                                                 filter_width, filter_height,
                                                 &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           maxPoolFloat32(reinterpret_cast<const float*>(input.buffer),
                                          input.shape(),
-                                         padding, stride_width, stride_height,
+                                         padding_left, padding_right,
+                                         padding_top, padding_bottom,
+                                         stride_width, stride_height,
                                          filter_width, filter_height, activation,
                                          reinterpret_cast<float*>(output.buffer),
                                          outShape);
             } else if (operation.opTuple.operandType == OperandType::TENSOR_QUANT8_ASYMM) {
                 success = genericPoolingPrepare(input.shape(),
-                                                padding, stride_width, stride_height,
+                                                padding_left, padding_right,
+                                                padding_top, padding_bottom,
+                                                stride_width, stride_height,
                                                 filter_width, filter_height,
                                                 &outShape) &&
                           allocateIfNeeded(&output, outShape) &&
                           maxPoolQuant8(reinterpret_cast<const uint8_t*>(input.buffer),
                                         input.shape(),
-                                        padding, stride_width, stride_height,
+                                        padding_left, padding_right,
+                                        padding_top, padding_bottom,
+                                        stride_width, stride_height,
                                         filter_width, filter_height, activation,
                                         reinterpret_cast<uint8_t*>(output.buffer),
                                         outShape);
diff --git a/nn/common/include/Operations.h b/nn/common/include/Operations.h
index 7b5b547..10b6b31 100644
--- a/nn/common/include/Operations.h
+++ b/nn/common/include/Operations.h
@@ -61,66 +61,87 @@
                                const Shape& shape);
 
 bool depthwiseConvPrepare(const Shape& input,
-                                 const Shape& filter,
-                                 const Shape& bias,
-                                 int32_t padding,
-                                 int32_t stride_width, int32_t stride_height,
-                                 Shape* output);
+                          const Shape& filter,
+                          const Shape& bias,
+                          int32_t padding_left, int32_t padding_right,
+                          int32_t padding_top, int32_t padding_bottom,
+                          int32_t stride_width, int32_t stride_height,
+                          Shape* output);
 bool depthwiseConvFloat32(const float* inputData, const Shape& inputShape,
                           const float* filterData, const Shape& filterShape,
                           const float* biasData, const Shape& biasShape,
-                          int32_t padding, int32_t stride_width, int32_t stride_height,
+                          int32_t padding_left, int32_t padding_right,
+                          int32_t padding_top, int32_t padding_bottom,
+                          int32_t stride_width, int32_t stride_height,
                           int32_t depth_multiplier, int32_t activation,
                           float* outputData, const Shape& outputShape);
 bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
                          const uint8_t* filterData, const Shape& filterShape,
                          const int32_t* biasData, const Shape& biasShape,
-                         int32_t padding, int32_t stride_width, int32_t stride_height,
+                         int32_t padding_left, int32_t padding_right,
+                         int32_t padding_top, int32_t padding_bottom,
+                         int32_t stride_width, int32_t stride_height,
                          int32_t depth_multiplier, int32_t activation,
                          uint8_t* outputData, const Shape& outputShape);
 
 bool convPrepare(const Shape& input,
                  const Shape& filter,
                  const Shape& bias,
-                 int32_t padding,
+                 int32_t padding_left, int32_t padding_right,
+                 int32_t padding_top, int32_t padding_bottom,
                  int32_t stride_width, int32_t stride_height,
                  Shape* output);
 bool convFloat32(const float* inputData, const Shape& inputShape,
                  const float* filterData, const Shape& filterShape,
                  const float* biasData, const Shape& biasShape,
-                 int32_t padding, int32_t stride_width, int32_t stride_height,
+                 int32_t padding_left, int32_t padding_right,
+                 int32_t padding_top, int32_t padding_bottom,
+                 int32_t stride_width, int32_t stride_height,
                  int32_t activation,
                  float* outputData, const Shape& outputShape);
 bool convQuant8(const uint8_t* inputData, const Shape& inputShape,
                 const uint8_t* filterData, const Shape& filterShape,
                 const int32_t* biasData, const Shape& biasShape,
-                int32_t padding, int32_t stride_width, int32_t stride_height,
+                int32_t padding_left, int32_t padding_right,
+                int32_t padding_top, int32_t padding_bottom,
+                int32_t stride_width, int32_t stride_height,
                 int32_t activation,
                 uint8_t* outputData, const Shape& outputShape);
 
 bool genericPoolingPrepare(const Shape& input,
-                           int32_t padding,
+                           int32_t padding_left, int32_t padding_right,
+                           int32_t padding_top, int32_t padding_bottom,
                            int32_t stride_width, int32_t stride_height,
                            int32_t filter_width, int32_t filter_height,
                            Shape* output);
 bool averagePoolFloat32(const float* inputData, const Shape& inputShape,
-                        int32_t padding, int32_t stride_width, int32_t stride_height,
+                        int32_t padding_left, int32_t padding_right,
+                        int32_t padding_top, int32_t padding_bottom,
+                        int32_t stride_width, int32_t stride_height,
                         int32_t filter_width, int32_t filter_height, int32_t activation,
                         float* outputData, const Shape& outputShape);
 bool averagePoolQuant8(const uint8_t* inputData, const Shape& inputShape,
-                       int32_t padding, int32_t stride_width, int32_t stride_height,
+                       int32_t padding_left, int32_t padding_right,
+                       int32_t padding_top, int32_t padding_bottom,
+                       int32_t stride_width, int32_t stride_height,
                        int32_t filter_width, int32_t filter_height, int32_t activation,
                        uint8_t* outputData, const Shape& outputShape);
 bool l2PoolFloat32(const float* inputData, const Shape& inputShape,
-                   int32_t padding, int32_t stride_width, int32_t stride_height,
+                   int32_t padding_left, int32_t padding_right,
+                   int32_t padding_top, int32_t padding_bottom,
+                   int32_t stride_width, int32_t stride_height,
                    int32_t filter_width, int32_t filter_height, int32_t activation,
                    float* outputData, const Shape& outputShape);
 bool maxPoolFloat32(const float* inputData, const Shape& inputShape,
-                    int32_t padding, int32_t stride_width, int32_t stride_height,
+                    int32_t padding_left, int32_t padding_right,
+                    int32_t padding_top, int32_t padding_bottom,
+                    int32_t stride_width, int32_t stride_height,
                     int32_t filter_width, int32_t filter_height, int32_t activation,
                     float* outputData, const Shape& outputShape);
 bool maxPoolQuant8(const uint8_t* inputData, const Shape& inputShape,
-                   int32_t padding, int32_t stride_width, int32_t stride_height,
+                   int32_t padding_left, int32_t padding_right,
+                   int32_t padding_top, int32_t padding_bottom,
+                   int32_t stride_width, int32_t stride_height,
                    int32_t filter_width, int32_t filter_height, int32_t activation,
                    uint8_t* outputData, const Shape& outputShape);
 
diff --git a/nn/common/include/OperationsUtils.h b/nn/common/include/OperationsUtils.h
index 02d2d15..60516db 100644
--- a/nn/common/include/OperationsUtils.h
+++ b/nn/common/include/OperationsUtils.h
@@ -47,14 +47,9 @@
 
 uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx);
 
-inline uint32_t ComputePadding(uint32_t stride, uint32_t in_size, uint32_t filter_size,
-                               uint32_t out_size) {
-    uint32_t tmp = (out_size - 1) * stride + filter_size;
-    if (tmp > in_size) {
-        return (tmp - in_size) / 2;
-    } else {
-        return 0;
-    }
+inline uint32_t computeOutSize(uint32_t imageSize, uint32_t filterSize, uint32_t stride,
+                               uint32_t paddingHead, uint32_t paddingTail) {
+    return (imageSize - filterSize + stride + paddingHead + paddingTail) / stride;
 }
 
 void QuantizeMultiplierSmallerThanOne(double double_multiplier,
diff --git a/nn/common/operations/Conv2D.cpp b/nn/common/operations/Conv2D.cpp
index e17c446..fbb20a0 100644
--- a/nn/common/operations/Conv2D.cpp
+++ b/nn/common/operations/Conv2D.cpp
@@ -29,7 +29,8 @@
 bool convPrepare(const Shape& input,
                  const Shape& filter,
                  const Shape& bias,
-                 int32_t padding,
+                 int32_t padding_left, int32_t padding_right,
+                 int32_t padding_top, int32_t padding_bottom,
                  int32_t stride_width, int32_t stride_height,
                  Shape* output) {
     DCHECK_EQ(getNumberOfDimensions(input), 4);
@@ -46,19 +47,10 @@
     uint32_t filterHeight = getSizeOfDimension(filter, 1);
     uint32_t batches      = getSizeOfDimension(input, 0);
 
-    // Matching GetWindowedOutputSize in TensorFlow.
-    // TODO: changing this to explicit padding.
-    auto computeOutSize = [padding](uint32_t imageSize, uint32_t filterSize,
-                                    uint32_t stride) -> int {
-        return padding == kPaddingSame
-                   ? (imageSize + stride - 1) / stride
-                   : padding == kPaddingValid
-                         ? (imageSize - filterSize + stride) / stride
-                         : 0;
-    };
-
-    uint32_t outWidth = computeOutSize(width, filterWidth, stride_width);
-    uint32_t outHeight = computeOutSize(height, filterHeight, stride_height);
+    uint32_t outWidth = computeOutSize(width, filterWidth, stride_width,
+                                       padding_left, padding_right);
+    uint32_t outHeight = computeOutSize(height, filterHeight, stride_height,
+                                        padding_top, padding_bottom);
 
     output->type = input.type;
     output->dimensions = {batches, outHeight, outWidth, channels_out};
@@ -74,10 +66,8 @@
     uint32_t outWidth     = getSizeOfDimension(outputShape, 2);                 \
     uint32_t inDepth      = getSizeOfDimension(inputShape, 3);                  \
                                                                                 \
-    uint32_t paddingHeight =                                                    \
-            ComputePadding(stride_height, height, filterHeight, outHeight);     \
-    uint32_t paddingWidth =                                                     \
-            ComputePadding(stride_width, width, filterWidth, outWidth);         \
+    uint32_t paddingHeight = (uint32_t)padding_top;                             \
+    uint32_t paddingWidth = (uint32_t)padding_left;                             \
                                                                                 \
     Dims<4> im2colDim;                                                          \
     im2colDim.sizes[3] = (int)getSizeOfDimension(outputShape, 0);               \
@@ -104,7 +94,10 @@
 bool convFloat32(const float* inputData, const Shape& inputShape,
                  const float* filterData, const Shape& filterShape,
                  const float* biasData, const Shape& biasShape,
-                 int32_t padding, int32_t stride_width, int32_t stride_height, int32_t activation,
+                 int32_t padding_left, int32_t padding_right,
+                 int32_t padding_top, int32_t padding_bottom,
+                 int32_t stride_width, int32_t stride_height,
+                 int32_t activation,
                  float* outputData, const Shape& outputShape) {
 
     ANDROID_NN_CONV_PARAMETERS(float)
@@ -130,7 +123,10 @@
 bool convQuant8(const uint8_t* inputData, const Shape& inputShape,
                 const uint8_t* filterData, const Shape& filterShape,
                 const int32_t* biasData, const Shape& biasShape,
-                int32_t padding, int32_t stride_width, int32_t stride_height, int32_t activation,
+                int32_t padding_left, int32_t padding_right,
+                int32_t padding_top, int32_t padding_bottom,
+                int32_t stride_width, int32_t stride_height,
+                int32_t activation,
                 uint8_t* outputData, const Shape& outputShape) {
 
     ANDROID_NN_CONV_PARAMETERS(uint8_t)
diff --git a/nn/common/operations/DepthwiseConv2D.cpp b/nn/common/operations/DepthwiseConv2D.cpp
index 2b704ba..854e1ed 100644
--- a/nn/common/operations/DepthwiseConv2D.cpp
+++ b/nn/common/operations/DepthwiseConv2D.cpp
@@ -26,7 +26,8 @@
 bool depthwiseConvPrepare(const Shape& input,
                           const Shape& filter,
                           const Shape& bias,
-                          int32_t padding,
+                          int32_t padding_left, int32_t padding_right,
+                          int32_t padding_top, int32_t padding_bottom,
                           int32_t stride_width, int32_t stride_height,
                           Shape* output) {
     DCHECK_EQ(getNumberOfDimensions(input), 4);
@@ -43,18 +44,10 @@
     uint32_t filterHeight = getSizeOfDimension(filter, 1);
     uint32_t batches      = getSizeOfDimension(input, 0);
 
-    // Matching GetWindowedOutputSize in TensorFlow.
-    auto computeOutSize = [padding](uint32_t imageSize, uint32_t filterSize,
-                                    uint32_t stride) -> int {
-        return padding == kPaddingSame
-                   ? (imageSize + stride - 1) / stride
-                   : padding == kPaddingValid
-                         ? (imageSize - filterSize + stride) / stride
-                         : 0;
-    };
-
-    uint32_t outWidth = computeOutSize(width, filterWidth, stride_width);
-    uint32_t outHeight = computeOutSize(height, filterHeight, stride_height);
+    uint32_t outWidth = computeOutSize(width, filterWidth, stride_width,
+                                       padding_left, padding_right);
+    uint32_t outHeight = computeOutSize(height, filterHeight, stride_height,
+                                        padding_top, padding_bottom);
 
     output->type = input.type;
     output->dimensions = {batches, outHeight, outWidth, channels_out};
@@ -70,15 +63,15 @@
     uint32_t outHeight    = getSizeOfDimension(outputShape, 1);                 \
     uint32_t outWidth     = getSizeOfDimension(outputShape, 2);                 \
                                                                                 \
-    uint32_t paddingHeight =                                                    \
-            ComputePadding(stride_height, height, filterHeight, outHeight);     \
-    uint32_t paddingWidth =                                                     \
-            ComputePadding(stride_width, width, filterWidth, outWidth);
+    uint32_t paddingHeight = (uint32_t)padding_top;                             \
+    uint32_t paddingWidth = (uint32_t)padding_left;
 
 bool depthwiseConvFloat32(const float* inputData, const Shape& inputShape,
                           const float* filterData, const Shape& filterShape,
                           const float* biasData, const Shape& biasShape,
-                          int32_t padding, int32_t stride_width, int32_t stride_height,
+                          int32_t padding_left, int32_t padding_right,
+                          int32_t padding_top, int32_t padding_bottom,
+                          int32_t stride_width, int32_t stride_height,
                           int32_t depth_multiplier, int32_t activation,
                           float* outputData, const Shape& outputShape) {
 
@@ -102,7 +95,9 @@
 bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
                          const uint8_t* filterData, const Shape& filterShape,
                          const int32_t* biasData, const Shape& biasShape,
-                         int32_t padding, int32_t stride_width, int32_t stride_height,
+                         int32_t padding_left, int32_t padding_right,
+                         int32_t padding_top, int32_t padding_bottom,
+                         int32_t stride_width, int32_t stride_height,
                          int32_t depth_multiplier, int32_t activation,
                          uint8_t* outputData, const Shape& outputShape) {
 
diff --git a/nn/common/operations/Pooling.cpp b/nn/common/operations/Pooling.cpp
index 14d011c..2ccae56 100644
--- a/nn/common/operations/Pooling.cpp
+++ b/nn/common/operations/Pooling.cpp
@@ -23,7 +23,8 @@
 namespace nn {
 
 bool genericPoolingPrepare(const Shape& input,
-                           int32_t padding,
+                           int32_t padding_left, int32_t padding_right,
+                           int32_t padding_top, int32_t padding_bottom,
                            int32_t stride_width, int32_t stride_height,
                            int32_t filter_width, int32_t filter_height,
                            Shape* output) {
@@ -35,37 +36,30 @@
     uint32_t height       = getSizeOfDimension(input, 1);
     uint32_t channels_out = getSizeOfDimension(input, 3);
 
-    // Matching GetWindowedOutputSize in TensorFlow.
-    auto computeOutSize = [padding](uint32_t imageSize, uint32_t filterSize,
-                                    uint32_t stride) -> int {
-        return padding == kPaddingSame
-                   ? (imageSize + stride - 1) / stride
-                   : padding == kPaddingValid
-                         ? (imageSize - filterSize + stride) / stride
-                         : 0;
-    };
-
-    uint32_t outWidth = computeOutSize(width, filter_width, stride_width);
-    uint32_t outHeight = computeOutSize(height, filter_height, stride_height);
+    uint32_t outWidth = computeOutSize(width, filter_width, stride_width,
+                                       padding_left, padding_right);
+    uint32_t outHeight = computeOutSize(height, filter_height, stride_height,
+                                        padding_top, padding_bottom);
 
     output->type = input.type;
     output->dimensions = {batches, outHeight, outWidth, channels_out};
     return true;
 }
 
+
 #define ANDROID_NN_POOLING_PARAMETERS                                           \
     uint32_t height       = getSizeOfDimension(inputShape, 1);                  \
     uint32_t width        = getSizeOfDimension(inputShape, 2);                  \
     uint32_t outHeight    = getSizeOfDimension(outputShape, 1);                 \
     uint32_t outWidth     = getSizeOfDimension(outputShape, 2);                 \
                                                                                 \
-    uint32_t paddingHeight =                                                    \
-            ComputePadding(stride_height, height, filter_height, outHeight);    \
-    uint32_t paddingWidth =                                                     \
-            ComputePadding(stride_width, width, filter_width, outWidth);
+    uint32_t paddingHeight = (uint32_t)padding_top;                             \
+    uint32_t paddingWidth = (uint32_t)padding_left;
 
 bool averagePoolFloat32(const float* inputData, const Shape& inputShape,
-                        int32_t padding, int32_t stride_width, int32_t stride_height,
+                        int32_t padding_left, int32_t padding_right,
+                        int32_t padding_top, int32_t padding_bottom,
+                        int32_t stride_width, int32_t stride_height,
                         int32_t filter_width, int32_t filter_height, int32_t activation,
                         float* outputData, const Shape& outputShape) {
 
@@ -85,7 +79,9 @@
 }
 
 bool averagePoolQuant8(const uint8_t* inputData, const Shape& inputShape,
-                       int32_t padding, int32_t stride_width, int32_t stride_height,
+                       int32_t padding_left, int32_t padding_right,
+                       int32_t padding_top, int32_t padding_bottom,
+                       int32_t stride_width, int32_t stride_height,
                        int32_t filter_width, int32_t filter_height, int32_t activation,
                        uint8_t* outputData, const Shape& outputShape) {
 
@@ -113,7 +109,9 @@
 }
 
 bool l2PoolFloat32(const float* inputData, const Shape& inputShape,
-                   int32_t padding, int32_t stride_width, int32_t stride_height,
+                   int32_t padding_left, int32_t padding_right,
+                   int32_t padding_top, int32_t padding_bottom,
+                   int32_t stride_width, int32_t stride_height,
                    int32_t filter_width, int32_t filter_height, int32_t activation,
                    float* outputData, const Shape& outputShape) {
 
@@ -133,7 +131,9 @@
 }
 
 bool maxPoolFloat32(const float* inputData, const Shape& inputShape,
-                    int32_t padding, int32_t stride_width, int32_t stride_height,
+                    int32_t padding_left, int32_t padding_right,
+                    int32_t padding_top, int32_t padding_bottom,
+                    int32_t stride_width, int32_t stride_height,
                     int32_t filter_width, int32_t filter_height, int32_t activation,
                     float* outputData, const Shape& outputShape) {
 
@@ -153,7 +153,9 @@
 }
 
 bool maxPoolQuant8(const uint8_t* inputData, const Shape& inputShape,
-                   int32_t padding, int32_t stride_width, int32_t stride_height,
+                   int32_t padding_left, int32_t padding_right,
+                   int32_t padding_top, int32_t padding_bottom,
+                   int32_t stride_width, int32_t stride_height,
                    int32_t filter_width, int32_t filter_height, int32_t activation,
                    uint8_t* outputData, const Shape& outputShape) {
 
diff --git a/nn/runtime/test/generated/models/avg_pool_float.model.cpp b/nn/runtime/test/generated/models/avg_pool_float.model.cpp
index d72ee60..f941795 100644
--- a/nn/runtime/test/generated/models/avg_pool_float.model.cpp
+++ b/nn/runtime/test/generated/models/avg_pool_float.model.cpp
@@ -5,14 +5,17 @@
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
   auto cons1 = model->addOperand(&type1);
+  auto pad0 = model->addOperand(&type1);
   auto act = model->addOperand(&type1);
   auto op3 = model->addOperand(&type0);
   // Phase 2, operations
   static int32_t cons1_init[] = {1};
   model->setOperandValue(cons1, cons1_init, sizeof(int32_t) * 1);
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {op1, cons1, cons1, cons1, cons1, cons1, act}, {op3});
+  model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {op1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op1},
diff --git a/nn/runtime/test/generated/models/avg_pool_quant8.model.cpp b/nn/runtime/test/generated/models/avg_pool_quant8.model.cpp
index 458077e..b53d8f9 100644
--- a/nn/runtime/test/generated/models/avg_pool_quant8.model.cpp
+++ b/nn/runtime/test/generated/models/avg_pool_quant8.model.cpp
@@ -5,14 +5,17 @@
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
   auto cons1 = model->addOperand(&type1);
+  auto pad0 = model->addOperand(&type1);
   auto act = model->addOperand(&type1);
   auto op3 = model->addOperand(&type0);
   // Phase 2, operations
   static int32_t cons1_init[] = {1};
   model->setOperandValue(cons1, cons1_init, sizeof(int32_t) * 1);
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {op1, cons1, cons1, cons1, cons1, cons1, act}, {op3});
+  model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {op1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op1},
diff --git a/nn/runtime/test/generated/models/conv_1_h3_w2_SAME.model.cpp b/nn/runtime/test/generated/models/conv_1_h3_w2_SAME.model.cpp
index 97dc907..b12ede5 100644
--- a/nn/runtime/test/generated/models/conv_1_h3_w2_SAME.model.cpp
+++ b/nn/runtime/test/generated/models/conv_1_h3_w2_SAME.model.cpp
@@ -5,7 +5,8 @@
   OperandType type1(Type::TENSOR_FLOAT32, {1, 8, 8, 3});
   OperandType type4(Type::TENSOR_FLOAT32, {1});
   // Phase 1, operands
-  auto b4 = model->addOperand(&type0);
+  auto pad0 = model->addOperand(&type0);
+  auto pad1 = model->addOperand(&type0);
   auto b5 = model->addOperand(&type0);
   auto b6 = model->addOperand(&type0);
   auto b7 = model->addOperand(&type0);
@@ -14,8 +15,10 @@
   auto op0 = model->addOperand(&type3);
   auto op1 = model->addOperand(&type4);
   // Phase 2, operations
-  int32_t b4_init[] = {1};
-  model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
+  int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  int32_t pad1_init[] = {1};
+  model->setOperandValue(pad1, pad1_init, sizeof(int32_t) * 1);
   int32_t b5_init[] = {1};
   model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
   int32_t b6_init[] = {1};
@@ -26,7 +29,7 @@
   model->setOperandValue(op0, op0_init, sizeof(float) * 18);
   float op1_init[] = {0};
   model->setOperandValue(op1, op1_init, sizeof(float) * 1);
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, b4, b5, b6, b7}, {op3});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, pad0, pad1, pad1, pad1, b5, b6, b7}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op2},
diff --git a/nn/runtime/test/generated/models/conv_1_h3_w2_VALID.model.cpp b/nn/runtime/test/generated/models/conv_1_h3_w2_VALID.model.cpp
index ca427fe..198db9b 100644
--- a/nn/runtime/test/generated/models/conv_1_h3_w2_VALID.model.cpp
+++ b/nn/runtime/test/generated/models/conv_1_h3_w2_VALID.model.cpp
@@ -5,7 +5,7 @@
   OperandType type1(Type::TENSOR_FLOAT32, {1, 8, 8, 3});
   OperandType type4(Type::TENSOR_FLOAT32, {1});
   // Phase 1, operands
-  auto b4 = model->addOperand(&type0);
+  auto pad0 = model->addOperand(&type0);
   auto b5 = model->addOperand(&type0);
   auto b6 = model->addOperand(&type0);
   auto b7 = model->addOperand(&type0);
@@ -14,8 +14,8 @@
   auto op0 = model->addOperand(&type3);
   auto op1 = model->addOperand(&type4);
   // Phase 2, operations
-  int32_t b4_init[] = {2};
-  model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
+  int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   int32_t b5_init[] = {1};
   model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
   int32_t b6_init[] = {1};
@@ -26,7 +26,7 @@
   model->setOperandValue(op0, op0_init, sizeof(float) * 18);
   float op1_init[] = {0};
   model->setOperandValue(op1, op1_init, sizeof(float) * 1);
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, b4, b5, b6, b7}, {op3});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, pad0, pad0, pad0, pad0, b5, b6, b7}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op2},
diff --git a/nn/runtime/test/generated/models/conv_3_h3_w2_SAME.model.cpp b/nn/runtime/test/generated/models/conv_3_h3_w2_SAME.model.cpp
index 6c6b8ae..a3ae5a5 100644
--- a/nn/runtime/test/generated/models/conv_3_h3_w2_SAME.model.cpp
+++ b/nn/runtime/test/generated/models/conv_3_h3_w2_SAME.model.cpp
@@ -4,7 +4,8 @@
   OperandType type2(Type::TENSOR_FLOAT32, {3, 3, 2, 3});
   OperandType type3(Type::TENSOR_FLOAT32, {3});
   // Phase 1, operands
-  auto b4 = model->addOperand(&type0);
+  auto pad0 = model->addOperand(&type0);
+  auto pad1 = model->addOperand(&type0);
   auto b5 = model->addOperand(&type0);
   auto b6 = model->addOperand(&type0);
   auto b7 = model->addOperand(&type0);
@@ -13,8 +14,10 @@
   auto op0 = model->addOperand(&type2);
   auto op1 = model->addOperand(&type3);
   // Phase 2, operations
-  int32_t b4_init[] = {1};
-  model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
+  int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  int32_t pad1_init[] = {1};
+  model->setOperandValue(pad1, pad1_init, sizeof(int32_t) * 1);
   int32_t b5_init[] = {1};
   model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
   int32_t b6_init[] = {1};
@@ -25,7 +28,7 @@
   model->setOperandValue(op0, op0_init, sizeof(float) * 54);
   float op1_init[] = {0, 0, 0};
   model->setOperandValue(op1, op1_init, sizeof(float) * 3);
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, b4, b5, b6, b7}, {op3});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, pad0, pad1, pad1, pad1, b5, b6, b7}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op2},
diff --git a/nn/runtime/test/generated/models/conv_3_h3_w2_VALID.model.cpp b/nn/runtime/test/generated/models/conv_3_h3_w2_VALID.model.cpp
index 5809b9e..58e4e2e 100644
--- a/nn/runtime/test/generated/models/conv_3_h3_w2_VALID.model.cpp
+++ b/nn/runtime/test/generated/models/conv_3_h3_w2_VALID.model.cpp
@@ -5,7 +5,7 @@
   OperandType type3(Type::TENSOR_FLOAT32, {3, 3, 2, 3});
   OperandType type4(Type::TENSOR_FLOAT32, {3});
   // Phase 1, operands
-  auto b4 = model->addOperand(&type0);
+  auto pad0 = model->addOperand(&type0);
   auto b5 = model->addOperand(&type0);
   auto b6 = model->addOperand(&type0);
   auto b7 = model->addOperand(&type0);
@@ -14,8 +14,8 @@
   auto op0 = model->addOperand(&type3);
   auto op1 = model->addOperand(&type4);
   // Phase 2, operations
-  int32_t b4_init[] = {2};
-  model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
+  int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   int32_t b5_init[] = {1};
   model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
   int32_t b6_init[] = {1};
@@ -26,7 +26,7 @@
   model->setOperandValue(op0, op0_init, sizeof(float) * 54);
   float op1_init[] = {0, 0, 0};
   model->setOperandValue(op1, op1_init, sizeof(float) * 3);
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, b4, b5, b6, b7}, {op3});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op2, op0, op1, pad0, pad0, pad0, pad0, b5, b6, b7}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op2},
diff --git a/nn/runtime/test/generated/models/depthwise_conv.model.cpp b/nn/runtime/test/generated/models/depthwise_conv.model.cpp
index fe99375..1807efa 100644
--- a/nn/runtime/test/generated/models/depthwise_conv.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv.model.cpp
@@ -4,7 +4,7 @@
   OperandType type1(Type::TENSOR_FLOAT32, {1, 8, 8, 3});
   OperandType type3(Type::TENSOR_FLOAT32, {3});
   // Phase 1, operands
-  auto b4 = model->addOperand(&type0);
+  auto pad0 = model->addOperand(&type0);
   auto b5 = model->addOperand(&type0);
   auto b6 = model->addOperand(&type0);
   auto b7 = model->addOperand(&type0);
@@ -14,8 +14,8 @@
   auto op0 = model->addOperand(&type2);
   auto op1 = model->addOperand(&type3);
   // Phase 2, operations
-  int32_t b4_init[] = {1};
-  model->setOperandValue(b4, b4_init, sizeof(int32_t) * 1);
+  int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   int32_t b5_init[] = {1};
   model->setOperandValue(b5, b5_init, sizeof(int32_t) * 1);
   int32_t b6_init[] = {1};
@@ -28,7 +28,7 @@
   model->setOperandValue(op0, op0_init, sizeof(float) * 3);
   float op1_init[] = {0, 0, 0};
   model->setOperandValue(op1, op1_init, sizeof(float) * 3);
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op2, op0, op1, b4, b5, b6, b7, b8}, {op3});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op2, op0, op1, pad0, pad0, pad0, pad0, b5, b6, b7, b8}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op2},
diff --git a/nn/runtime/test/generated/models/l2_pool_float.model.cpp b/nn/runtime/test/generated/models/l2_pool_float.model.cpp
index 6c86379..2307942 100644
--- a/nn/runtime/test/generated/models/l2_pool_float.model.cpp
+++ b/nn/runtime/test/generated/models/l2_pool_float.model.cpp
@@ -5,14 +5,17 @@
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
   auto cons1 = model->addOperand(&type1);
+  auto pad0 = model->addOperand(&type1);
   auto act = model->addOperand(&type1);
   auto op3 = model->addOperand(&type0);
   // Phase 2, operations
   static int32_t cons1_init[] = {1};
   model->setOperandValue(cons1, cons1_init, sizeof(int32_t) * 1);
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_L2_POOL_2D, {op1, cons1, cons1, cons1, cons1, cons1, act}, {op3});
+  model->addOperation(ANEURALNETWORKS_L2_POOL_2D, {op1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op1},
diff --git a/nn/runtime/test/generated/models/max_pool_float.model.cpp b/nn/runtime/test/generated/models/max_pool_float.model.cpp
index ee6555b..e5925db 100644
--- a/nn/runtime/test/generated/models/max_pool_float.model.cpp
+++ b/nn/runtime/test/generated/models/max_pool_float.model.cpp
@@ -5,14 +5,17 @@
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
   auto cons1 = model->addOperand(&type1);
+  auto pad0 = model->addOperand(&type1);
   auto act = model->addOperand(&type1);
   auto op3 = model->addOperand(&type0);
   // Phase 2, operations
   static int32_t cons1_init[] = {1};
   model->setOperandValue(cons1, cons1_init, sizeof(int32_t) * 1);
+  static int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_MAX_POOL_2D, {op1, cons1, cons1, cons1, cons1, cons1, act}, {op3});
+  model->addOperation(ANEURALNETWORKS_MAX_POOL_2D, {op1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act}, {op3});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
     {op1},
diff --git a/nn/runtime/test/generated/models/mobilenet_224_gender_basic_fixed.model.cpp b/nn/runtime/test/generated/models/mobilenet_224_gender_basic_fixed.model.cpp
index d035324..4d7192d 100644
--- a/nn/runtime/test/generated/models/mobilenet_224_gender_basic_fixed.model.cpp
+++ b/nn/runtime/test/generated/models/mobilenet_224_gender_basic_fixed.model.cpp
@@ -35,6 +35,8 @@
   OperandType type29(Type::TENSOR_FLOAT32, {64, 1, 1, 64});
   OperandType type26(Type::TENSOR_FLOAT32, {64});
   // Phase 1, operands
+  auto pad0 = model->addOperand(&type0);
+  auto pad1 = model->addOperand(&type0);
   auto b87 = model->addOperand(&type0);
   auto b88 = model->addOperand(&type0);
   auto b89 = model->addOperand(&type0);
@@ -254,6 +256,10 @@
   auto op83 = model->addOperand(&type33);
   auto op84 = model->addOperand(&type34);
   // Phase 2, operations
+  int32_t pad0_init[] = {0};
+  model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
+  int32_t pad1_init[] = {1};
+  model->setOperandValue(pad1, pad1_init, sizeof(int32_t) * 1);
   int32_t b87_init[] = {1};
   model->setOperandValue(b87, b87_init, sizeof(int32_t) * 1);
   int32_t b88_init[] = {2};
@@ -628,35 +634,35 @@
   model->setOperandValue(op83, op83_init, sizeof(float) * 11);
   float op84_init[] = {0.0984852f, 0.672424f, 0.392549f, -0.262182f, 0.303914f, -0.0118188f, 0.027943f, 0.0164078f, 0.200583f, -0.808626f, 0.234772f, -0.0253635f, -0.198519f, -0.176824f, -0.580674f, 0.0681573f, -0.0134279f, 0.172173f, -0.284882f, -0.0895141f, 0.0142356f, -0.0479431f, 0.0736678f, 0.00298977f, 0.152355f, -0.0370715f, 0.463201f, 0.0146613f, 0.0971624f, -0.0791196f, 0.556621f, -0.00950762f, 0.0160531f, 0.091037f, 0.376353f, -0.0996081f, -0.0418334f, -0.427482f, -0.202679f, -0.197079f, 0.021873f, -0.105617f, 0.36447f, 0.389277f, 0.0429815f, 0.0480496f, -0.170086f, -0.191548f, -0.237921f, 0.155838f, -0.100796f, 0.0539355f, 0.103154f, 0.0441985f, -0.20672f, 0.358565f, -0.105794f, -0.635905f, 0.193301f, 0.112419f, -0.184668f, 0.157954f, -0.301095f, -0.153072f, -0.0535615f, -0.0661999f, -0.197056f, -0.0835003f, -0.074932f, -0.111766f, -0.356266f, 0.649165f, -0.0527003f, -0.0597135f, 0.109839f, -0.270809f, 0.0333183f, -0.211454f, 0.0594729f, -0.166949f, 0.21057f, 0.224925f, -0.222835f, -0.0178217f, 0.127268f, 0.229248f, 0.262987f, 0.0318244f, 0.293201f, -0.361254f, -0.0624992f, -0.0696259f, 0.0456531f, -0.0287401f, 0.0863351f, -0.106142f, 0.81137f, 0.305728f, 0.398482f, -0.0190696f, -0.133965f, -0.223757f, -0.153242f, -0.261303f, 0.111363f, -0.113733f, 0.0028724f, -0.0878969f, 0.0498853f, -0.000613516f, -0.0819123f, -0.0154599f, -0.0938842f, -0.108295f, 0.340323f, -0.139579f, -0.117066f, 0.145283f, -0.106254f, 0.201248f, -0.152479f, 0.162457f, -0.0751263f, 0.00127508f, -0.0218281f, 0.126278f, -0.100075f, 0.426783f, -0.108719f, 0.207569f, -0.327427f, 0.277309f, 0.0404061f, -0.334901f, 0.154047f, -0.287619f, 0.0161922f, -0.00054208f, -0.233675f, 0.564603f, 0.201628f, 0.0510375f, -0.16502f, -0.0155493f, -0.125359f, -0.0996153f, 0.0133961f, -0.492208f, 0.109118f, -0.136327f, 0.0252329f, 0.0556799f, -0.196804f, -0.0612012f, -0.0392273f, 0.133385f, 0.253763f, -0.208136f, -0.00507434f, -0.0584744f, 0.0855089f, -0.00321895f, -0.209376f, 0.0618401f, 0.0129248f, -0.130721f, -0.168413f, 0.122652f, 0.0927544f, -0.180775f, -0.0463842f, -0.626248f, -0.00596579f, 0.0822374f, -0.254325f, -0.361624f, 0.778701f, -0.0705549f, 0.40832f, 0.0932269f, 0.10348f, 0.258843f, -0.117135f, 0.131713f, -0.457018f, -0.364692f, 0.0741725f, 0.168267f, 0.0904773f, -0.333243f, 0.18358f, -0.0407786f, -0.0115824f, 0.304328f, 0.177285f, 0.206312f, -0.503914f, 0.310439f, 0.533919f, 0.0925376f, 0.449889f, -0.45417f, 0.89017f, -0.00580558f, 0.317744f, 0.0176692f, -0.0267303f, -0.0657997f, -0.333455f, -0.0895455f, -0.0203959f, -0.329956f, 0.0542947f, -0.03533f, 0.0496151f, 0.145015f, 0.135449f, -0.239986f, -0.442413f, -0.0922021f, 0.396803f, 0.0695849f, -0.00921835f, 0.405834f, 0.477558f, 0.08952f, 0.101425f, -0.0264703f, -0.124621f, 0.070554f, -0.101953f, 0.224768f, 0.021384f, 0.293433f, -0.297231f, 0.0841252f, 0.0290684f, -0.211267f, -0.116215f, 0.433678f, -0.626231f, -0.139838f, 0.0290375f, -0.24486f, 0.282119f, -0.486426f, -0.402424f, -0.561959f, -0.450933f, 0.0501238f, -0.194682f, -0.231145f, -0.210372f, -0.0802564f, -0.170723f, -0.248902f, -0.0122576f, 0.0776341f, 0.197615f, 0.094212f, 0.0318287f, -0.237544f, 0.135516f, -0.537321f, -0.0906906f, 0.172587f, 0.179816f, 0.0792088f, 0.354531f, 0.0801259f, 0.0145845f, -0.14874f, 0.0367363f, -0.0733148f, -0.125755f, -0.252037f, -0.101672f, -0.14809f, -0.188341f, -0.264003f, -0.201581f, -0.0605842f, 0.0142779f, -0.322517f, -0.130978f, 0.301363f, -0.276394f, 0.0248554f, -0.168732f, 0.158651f, 0.150037f, -0.0472578f, 0.241238f, -0.109832f, -0.500172f, -0.0574687f, 0.143137f, 0.177313f, 0.0489008f, 0.24142f, -0.0742049f, -0.103464f, -0.0383113f, -0.0148622f, -0.101849f, 0.0425005f, 0.0543708f, 0.0710147f, 0.169901f, 0.304119f, 0.180413f, -0.330647f, -0.250029f, 0.0651902f, 0.173465f, -0.475872f, 0.393697f, 0.147345f, -0.00802343f, -0.0545821f, -0.119f, -0.0282713f, 0.0414947f, 0.0618215f, -0.132909f, 0.480818f, -0.124287f, -0.0484199f, -0.344362f, 0.071471f, 0.267047f, -0.279627f, -0.289336f, 0.0609794f, 0.339502f, -0.0956702f, -0.361749f, -0.0153208f, -0.102628f, 0.0936787f, -0.130392f, 0.348396f, 0.200636f, -0.249164f, -0.177583f, -0.0716032f, 0.118703f, 0.123365f, -0.0366422f, 0.231096f, 0.0022177f, 0.128202f, 0.222367f, -0.176409f, -0.153065f, -0.0287899f, -0.355792f, -0.543125f, 0.177245f, 0.116598f, 0.0451388f, -0.0286715f, -0.174033f, 0.476808f, 0.298325f, -0.0593149f, -0.0491401f, 0.0263619f, 0.0565123f, 0.0500395f, -0.40961f, -0.0481743f, -0.0744737f, -0.050528f, -0.428685f, -0.0457881f, -0.105794f, 0.0951161f, -0.299268f, -0.229566f, -0.206985f, -0.0780657f, -0.0322681f, 0.266195f, -0.0781984f, -0.598814f, -0.280207f, 0.0516518f, -0.0447187f, 0.0980521f, 0.0216666f, 0.038809f, 0.147272f, -0.357397f, 0.0504251f, 0.126596f, -0.0935991f, -0.142778f, 0.0864683f, -0.116768f, -0.164657f, -0.380078f, 0.00184015f, -0.0684899f, -0.134349f, 0.184285f, -0.281853f, -0.185581f, 0.347765f, 0.301739f, -0.17311f, -0.0586592f, -0.253355f, 0.135704f, -0.025141f, -0.398732f, 0.176819f, 0.164295f, -0.0964961f, 0.235867f, -0.162969f, -0.365092f, 0.0342f, 0.305977f, 0.192868f, -0.150942f, 0.132645f, 0.220341f, -0.158242f, -0.168888f, 0.103491f, -0.1672f, 0.0127892f, -0.0176947f, 0.230234f, -0.129157f, -0.319789f, -0.188887f, 0.469657f, 0.0599872f, 0.173128f, 0.207658f, -0.257826f, 0.422512f, 0.0304435f, -0.0700446f, 0.00292699f, -0.254277f, -0.0987592f, 0.0906241f, -0.234816f, 0.030083f, -0.00973596f, 0.120037f, -0.317601f, -0.12708f, 0.102184f, 0.0740557f, 0.191923f, 0.215419f, 0.090792f, -0.416807f, -0.211088f, -0.0667573f, -0.042666f, 0.00698668f, -0.187608f, 0.11397f, 0.0282127f, -0.0646227f, -0.0786383f, 0.338181f, -0.158486f, -0.0404435f, -0.148313f, 0.129857f, 0.036822f, 0.214085f, 0.0271965f, 0.0712011f, -0.0142654f, 0.21793f, -0.101845f, -0.0134659f, -0.386899f, -0.253225f, -0.201138f, -0.168f, -0.111886f, 0.149919f, -0.252716f, -0.312013f, -0.494531f, 0.20132f, 0.1455f, -0.0390248f, -0.2497f, 0.0187322f, 0.212352f, 0.176346f, -0.0186768f, -0.0587664f, 0.140535f, 0.130711f, -0.048937f, -0.0333832f, 0.146999f, -0.0536035f, -0.210655f, 0.277771f, 0.136683f, -0.458041f, 0.106529f, -0.152398f, -0.0336699f, 0.151721f, -0.0533765f, -0.168961f, 0.175815f, -0.24888f, 0.0907924f, -0.0133408f, 0.175644f, -0.0246879f, -0.00687254f, 0.185182f, -0.256385f, -0.163355f, -0.256984f, -0.315761f, -0.181824f, -0.0306672f, 0.152588f, -0.0713595f, -0.0721906f, -0.332328f, -0.322698f, -0.00929737f, 0.0818944f, 0.0742352f, -0.166805f, 0.0944738f, -0.167636f, 0.0871255f, 0.0792785f, 0.0354259f, 0.293364f, 0.215322f, 0.272799f, -0.0492312f, -0.269483f, -0.220346f, -0.0881883f, -0.105395f, 0.170322f, 0.0396378f, 0.0702321f, 0.0164758f, -0.0229642f, -0.120222f, -0.00534489f, 0.138123f, -0.141178f, 0.00600586f, 0.0114309f, 0.160046f, -0.0782422f, -0.221657f, -0.222359f, -0.0160572f, -0.0427344f, -0.0939852f, 0.19013f, 0.128755f, 0.0826387f, 0.0959137f, -0.121338f, 0.116419f, -0.0815084f, -0.148231f, -0.102396f, -0.302046f, -0.0136386f, 0.146457f, -0.273797f, -0.0766018f, 0.103427f, -0.0941844f, -0.236219f, -0.106905f, 0.188707f, -0.119065f, -0.109619f, -0.376718f, -0.250552f, -0.119213f, -0.0698239f, 0.0548951f, -0.0984231f, -0.274015f, 0.0116218f, -0.0560431f, -0.0176495f, 0.106143f, 0.191658f, -0.291245f, 0.198666f, -0.1415f, 0.121305f, 0.00787936f, -0.161106f, -0.0559996f, -0.025235f, -0.227444f, 0.124586f, 0.153714f, 0.0339968f, -0.0791643f, -0.204395f, -0.139891f, -0.136988f, -0.182275f, 0.059441f, -0.135392f, -0.0206536f, -0.177236f, -0.0461415f, 0.0707632f, 0.279827f, -0.00538458f, -0.0227107f, -0.0780397f, 0.0654234f, -0.00893195f, -0.111956f, -0.298613f, -0.35016f, 0.0515563f, -0.257037f, 0.139683f, -0.0568245f, -0.18912f, 0.054686f, 0.230304f, 0.0682762f, -0.104554f, -0.267018f, -0.00695182f, -0.42745f, -0.118246f, 0.240312f, -0.0283745f, -0.0410208f, -0.204045f, 0.0536799f, 0.158019f, -0.217282f, -0.255996f, -0.130733f, -0.0754242f, -0.205957f, -0.042236f, -0.237091f, -0.0547223f, 0.318243f, 0.114416f, -0.135642f, -0.0316242f, -0.347453f, 0.101281f, 0.012845f, -0.212307f, 0.135502f, -0.217902f, -0.0520036f, -0.169676f, 0.0155753f, -0.378887f, -0.120698f, 0.278682f, -0.208085f, 0.0188473f, -0.167479f, 0.3823f, -0.262327f, 0.0653896f, 0.0837105f, -0.175588f, -0.172008f, 0.279217f, 0.109674f, -0.0610411f, -0.261709f, -0.12329f, -0.214598f, 0.0449085f, 0.0995378f, 0.123743f, -0.20637f, 0.0336271f, 0.179009f, -0.103686f, -0.0319235f, 0.0991055f, -0.15149f, 0.11167f, -0.0458526f, -0.216373f, 0.0944096f, 0.257391f, -0.138348f, -0.0792016f, 0.236858f, -0.177544f, 0.00179313f, -0.0475954f, -0.325425f, -0.443611f, 0.269018f, 0.0823181f, -0.189893f, -0.00310759f, 0.38809f, -0.0297613f, -0.0772569f, 0.117555f, -0.0146545f, 0.24652f, -0.124915f, -0.0226053f, -0.00351846f, 0.123489f, 0.374272f, 0.00411916f, -0.0530559f, -0.459548f, -0.068397f, 0.351112f, 0.20717f, -0.169705f, -0.191568f, -0.0149611f, -0.200327f, -0.0366789f, -0.000831896f, 0.0329813f, 0.0928899f, -0.217083f, -0.1015f, -0.108356f, -0.155276f, -0.224902f, -0.161009f, -0.195741f, -0.196345f, 0.0696936f, -0.0903938f, 0.0346839f, 0.0342342f, 0.108802f, 0.0224264f, -0.116966f, -0.0868056f, 0.41173f, -0.139741f, 0.0816925f, 0.0206459f, -0.0857387f, -0.0889723f, 0.0252684f, 0.122225f, 0.281325f, -0.0975601f, -0.0890313f, -0.202703f, -0.232747f, -0.16356f, -0.109103f, -0.000627448f, -0.281988f, 0.133017f, 0.199669f, -0.305566f, -0.298914f, -0.120265f, -0.0757179f, -0.298619f, 0.183222f, -0.142981f, 0.0896671f, 0.175904f, 0.0175519f, -0.16538f, -0.0520677f, -0.0670482f, -0.00336189f, -0.223379f, -0.0609024f, -0.27571f, -0.0763393f, 0.295597f, 0.00951529f, 0.127656f, 0.323394f, 0.321615f, 0.184786f, 0.120165f, 0.0270615f, 0.232585f, -0.378135f, 0.00705762f, -0.152686f, -0.25289f, 0.0996134f, 0.0515323f, 0.0147273f, -0.746546f, -0.161453f, 0.0907721f, 0.015299f, -0.0842891f, -0.0432424f, -0.523789f, -0.271467f, 0.0367782f, -0.24899f, 0.207861f, 0.0755162f, 0.173391f, 0.222453f, -0.113516f, -0.24137f, 0.100824f, -0.0606065f, 0.00548546f, 0.0558509f, -0.0575758f, 0.245029f, 0.178345f, 0.143839f, -0.244105f, -0.172561f, -0.338056f, -0.127348f, 0.31021f, -0.115489f, -0.0672434f, -0.0625748f, -0.180578f, -0.227379f, 0.11236f, 0.10313f, 0.166569f, 0.158167f, -0.0638876f, 0.161796f, 0.0371649f, -0.328319f, -0.336786f, -0.211983f, 0.0293737f, -0.115773f, 0.00937545f, -0.246018f, 0.35231f, 0.195708f, 0.0478146f, -0.103948f, -0.106301f, 0.211148f, 0.379093f, 0.416716f, -0.174341f, -0.0187881f, -0.510292f, 0.0914475f, 0.0227487f, -0.100022f, -0.141782f, -0.0911218f, 0.0475971f, -0.244332f, -0.0995312f, -0.209683f, 0.0118146f, -0.333827f, 0.0784702f, 0.152256f, -0.0219116f, 0.138452f, -0.0222356f, -0.0565779f, 0.158486f, -0.24482f, -0.00680468f, 0.197839f, 0.0154492f, -0.00997484f, -0.221046f, -0.0717462f, -0.174674f, -0.121365f, -0.225961f, 0.0249583f, -0.012674f, -0.0461503f, 0.326105f, 0.159991f, 0.0172039f, -0.33672f, -0.0282964f, 0.340149f, -0.102354f, -0.32463f, 0.0968813f, 0.142316f, -0.0457009f, -0.449412f, 0.010723f, 0.234789f, -0.0556804f, 0.13699f, 0.346469f, 0.0485624f, 0.158279f, -0.064993f, -0.103656f, -0.058024f, -0.160934f, -0.154483f, -0.208516f, 0.171658f, -0.105681f, -0.0694062f, -0.430509f, 0.0281458f, -0.145734f, 0.00672611f, -0.263346f, 0.398998f, -0.107815f, 0.0612669f, 0.229766f, -0.0120696f, 0.221093f, -0.172262f, 0.0251312f, -0.0730561f, -0.316371f, 0.188185f, -0.046221f, -0.199885f, 0.119867f, 0.218638f, -0.329465f, -0.324384f, -0.141421f, 0.0441414f, 0.0694141f, 0.255176f, 0.0668514f, -0.0346173f, -0.00232405f, 0.194615f, 0.281005f, -0.0199741f, 0.035436f, 0.130112f, -0.0913306f, 0.329646f, -0.0752686f, 0.109595f, 0.0791733f, -0.0692778f, 0.305223f, -0.203791f, 0.124741f, 0.235692f, 0.0366247f, 0.0102351f, 0.0518547f, -0.0949171f, 0.149521f, -0.0588182f, -0.0129089f, -0.232551f, -0.0145967f, -0.0175136f, -0.0871548f, 0.0947253f, 0.0243044f, -0.0628619f, -0.0492656f, -0.299999f, -0.217482f, -0.140209f, -0.0874081f, 0.0812857f, 0.0233994f, -0.389155f, 0.200308f, -0.131029f, 0.299059f, -0.110117f, -0.289113f, -0.0365339f, -0.233167f, -0.108743f, -0.261932f, -0.159673f, -0.106053f, 0.199852f, -0.106121f, 0.0759607f, 0.472064f, -0.163932f, -0.31763f, 0.0104898f, -0.0210451f, -0.0787518f, 0.155917f, 0.102614f, -0.0425018f, 0.104758f, 0.0857415f, -0.155914f, 0.239264f, -0.144245f, 0.0138479f, -0.196582f, -0.225119f, 0.119061f, 0.0667646f, 0.0661826f, -0.190163f, 0.146226f, 0.0857013f, -0.39394f, 0.00735058f, 0.17735f, 0.244409f, 0.06301f, 0.169556f, -0.178062f, 0.12862f, 0.416925f, 0.0967157f, -0.00742805f, -0.000430865f, 0.151077f, -0.135911f, -0.259045f, -0.367174f, -0.13922f, 0.23333f, -0.219153f, -0.101108f, -0.108457f, -0.0457349f, -0.0666834f, 0.222968f, 0.0223704f, 0.0866147f, 0.0902093f, 0.141006f, 0.230202f, 0.0586954f, 0.26749f, 0.0443342f, 0.424975f, -0.159726f, -0.16713f, -0.10332f, 0.126135f, 0.125221f, 0.220837f, -0.121812f, -0.20649f, 0.161173f, -0.0608088f, 0.751833f, 0.177478f, -0.107548f, 0.0103489f, -0.212986f, 0.177713f, -0.353158f, -0.0872167f, 0.126602f, 0.0343864f, 0.0116791f, 0.0520713f, 0.00361525f, 0.194245f, -0.114742f, 0.020037f, -0.114726f, 0.126897f, 0.039019f, 0.445555f, -0.0193091f, 0.0637067f, -0.128501f, -0.0345904f, 0.0988956f, 0.178154f, -0.0259671f, -0.0257689f, -0.091025f, 0.0684302f, 0.131971f, 0.0459931f, 0.278118f, -0.0376653f, -0.156248f, -0.0789752f, -0.160455f, 0.353474f, 0.0503084f, -0.194132f, 0.124681f, -0.0915903f, 0.117273f, 0.0232574f, -0.0337332f, 0.0175596f, -0.203004f, 0.132872f, -0.200533f, 0.111507f, 0.452312f, 0.0770053f, 0.201455f, -0.267448f, 0.0539831f, -0.187271f, -0.0896206f, -0.0906231f, 0.174122f, 0.00151794f, -0.44301f, -0.038296f, -0.179995f, -0.0717158f, -0.136493f, -0.163935f, -0.0208884f, 0.361374f, 0.219308f, -0.0691815f, 0.20319f, -0.0567725f, 0.272091f, 0.228685f, 0.0701021f, -0.122392f, -0.280011f, 0.0584825f, -0.054271f, 0.00700558f, 0.0727541f, 0.0566045f, -0.197892f, 0.024467f, -0.192888f, -0.0819263f, -0.0201281f, 0.248612f, 0.0373216f, 0.0864792f, 0.283391f, 0.189835f, 0.0781828f, -0.0364776f, -0.00516293f, -0.136433f, -0.0563264f, 0.184467f, -0.103843f, 0.143026f, 0.153189f, -0.0523581f, 0.213201f, 0.144222f, -0.368817f, 0.150695f, 0.0357488f, 0.44351f, -0.167891f, 0.289154f, -0.227813f, -0.321075f, 0.0209248f, 0.00428332f, 0.0969976f, -0.108528f, 0.0284129f, 0.0762366f, 0.107821f, 0.119178f, 0.213134f, -0.061735f, -0.172152f, 0.161251f, -0.0093495f, 0.32946f, 0.219039f, -0.287137f, -0.0450728f, -0.0452836f, -0.212494f, -0.107495f, -0.188338f, 0.0459348f, -0.0377559f, -0.0839975f, -0.00428969f, -0.0232576f, 0.0289588f, 0.164926f, -0.0425852f, -0.0543849f, 0.11673f, 0.158114f, 0.159165f, 0.0941762f, -0.0546047f, 0.237165f, -0.0486095f, -0.146102f, -0.196763f, -0.300198f, 0.0103576f, -0.309314f, -0.122579f, -0.147076f, -0.252579f, -0.00101733f, -0.288208f, -0.22112f, 0.311517f, -0.112453f, 0.129476f, -0.324617f, -0.122931f, -0.123137f, 0.000923043f, -0.117103f, 0.0235433f, -0.271816f, 0.141558f, -0.057682f, -0.120304f, -0.106198f, 0.0265892f, 0.254805f, 0.173984f, -0.266907f, 0.0103511f, -0.0901396f, -0.164973f, -0.226945f, 0.0137655f, 0.0133529f, -0.151525f, 0.256784f, 0.132003f, 0.24828f, -0.0647662f, 0.143638f, 0.0600663f, -0.18841f, -0.0538587f, 0.293896f, -0.103811f, -0.389949f, 0.073149f, 0.102529f, 0.00501293f, 0.315232f, 0.231291f, -0.176493f, -0.140862f, -0.133106f, 0.0161411f, -0.210105f, -0.125995f, -0.0174128f, 0.00283163f, -0.16739f, -0.00931349f, -0.26984f, -0.315777f, -0.248987f, -0.144968f, 0.166966f, 0.169746f, -0.220713f, -0.0312972f, 0.156324f, -0.0407818f, -0.139328f, -0.440265f, -0.0850991f, 0.188168f, 0.106694f, 0.154731f, 0.159212f, -0.200953f, -0.037807f, 0.36218f, -0.123355f, 0.396598f, -0.036044f, -0.071492f, 0.189546f, -0.115796f, -0.0827317f, -0.0544022f, -0.222727f, 0.0347514f, -0.0295377f, 0.101372f, -0.0471416f, 0.218466f, -0.0403298f, -0.0743297f, -0.0607741f, -0.0177818f, -0.0976377f, 0.182365f, -0.26278f, 0.0619466f, 0.335466f, -0.039433f, -0.214658f, -0.00413142f, 0.118605f, -0.0871774f, -0.013047f, -0.0139049f, -0.0566686f, -0.0765434f, -0.0230406f, -0.10839f, -0.164259f, -0.110342f, -0.0567072f, 0.0359454f, 0.161352f, -0.271192f, 0.0673184f, -0.0400687f, -0.0291176f, -0.0505437f, -0.167017f, -0.244246f, 0.0127467f, -0.188325f, -0.171548f, 0.0819252f, -0.184143f, -0.0280647f, -0.175439f, -0.0298673f, 0.0928547f, -0.114129f, 0.160686f, 0.124866f, -0.0799349f, -0.0461555f, -0.0569828f, -0.07544f, -0.254674f, 0.200119f, 0.395232f, -0.104755f, -0.0705698f, -0.168159f, -0.363371f, -0.28949f, -0.157786f, 0.0803677f, 0.253256f, 0.183266f, -0.098531f, -0.217913f, -0.277753f, -0.0412087f, 0.0929791f, 0.0416587f, -0.393095f, -0.194569f, 0.115027f, 0.00374004f, -0.230992f, 0.178052f, 0.11554f, -0.112156f, -0.136296f, 0.147941f, 0.160641f, -0.0988691f, -0.156255f, -0.183889f, -0.198891f, 0.0487718f, -0.10064f, 0.0618672f, 0.129453f, 0.245253f, -0.0609817f, -0.0423283f, 0.209125f, -0.00764558f, -0.207093f, 0.090427f, 0.344761f, -0.210035f, 0.0190305f, 0.177226f, -0.478754f, 0.102217f, -0.0815951f, 0.184152f, -0.0708748f, -0.288034f, 0.212553f, -0.00799922f, 0.0402337f, -0.0634731f, -0.0157662f, 0.0380505f, 0.297157f, -0.102219f, 0.270945f, -0.0364033f, -0.223053f, -0.313967f, -0.256362f, 0.00947424f, 0.1584f, 0.0508195f, 0.127063f, 0.161099f, -0.176547f, -0.06178f, 0.28597f, 0.0661753f, 0.115497f, -0.266217f, 0.207641f, 0.288968f, -0.147556f, 0.00127605f, 0.25902f, 0.0888035f, -0.172818f, 0.0106958f, -0.259761f, -0.0210704f, 0.11259f, 0.118585f, -0.131654f, 0.0889418f, -0.141959f, 0.0686276f, 0.119914f, -0.315549f, -0.106624f, 0.356014f, 0.0856996f, -0.121974f, -0.0188067f, -0.150179f, -0.0971979f, -0.15594f, 0.15098f, -0.111329f, -0.258716f, -0.390928f, 0.105128f, -0.170122f, -0.114675f, -0.119159f, 0.0893049f, 0.0829629f, -0.174787f, -0.020651f, 0.059119f, -0.120192f, -0.192243f, 0.22854f, 0.0524963f, -0.17855f, 0.129937f, 0.0181097f, 0.151171f, -0.104886f, -0.195503f, 0.166139f, -0.132779f, -0.0952646f, -0.238117f, -0.120478f, 0.250843f, 0.0198936f, -0.16349f, 0.00793157f, -0.139775f, 0.0621653f, 0.102649f, 0.0159358f, -0.173693f, 0.000424589f, 0.0499097f, -0.213681f, 0.000829991f, 0.0470139f, -0.104087f, -0.104971f, 0.154429f, -0.0514045f, 0.021679f, 0.0637851f, 0.0263575f, -0.0773971f, 0.0792207f, 0.0289109f, -0.190421f, -0.114429f, -0.0980095f, 0.0697401f, -0.128251f, 0.0884518f, 0.215688f, -0.503879f, -0.0634976f, -0.0256412f, 0.26015f, -0.082886f, 0.0134682f, -0.1982f, 0.203755f, 0.237095f, -0.178199f, -0.110421f, -0.123333f, 0.0505219f, 0.0872408f, 0.134674f, -0.151414f, -0.20904f, 0.0162698f, -0.0281258f, -0.0696107f, 0.0384256f, -0.316446f, -0.0999238f, -0.0215575f, -0.16317f, -0.422117f, -0.401993f, 0.0318225f, 0.179985f, 0.0327708f, 0.237595f, 0.00156168f, 0.190076f, 0.0242173f, -0.149916f, -0.0292071f, -0.0634601f, -0.353369f, 0.191598f, 0.268846f, 0.0919142f, -0.0838139f, 0.041469f, 0.195228f, -0.304184f, -0.0524774f, 0.0257366f, -0.0669865f, 0.0712212f, -0.165418f, -0.0485386f, 0.135066f, 0.178966f, -0.315931f, -0.160149f, 0.198644f, 0.117106f, -0.130927f, -0.254406f, -0.151422f, 0.0451171f, 0.0421164f, -0.120035f, 0.0517401f, 0.0150269f, 0.0749926f, 0.268662f, -0.213943f, -0.0568393f, 0.122747f, 0.154528f, -0.0203424f, -0.0819281f, -0.201227f, 0.155029f, -0.285458f, -0.081893f, 0.141846f, 0.12811f, 0.17107f, -0.262672f, -0.112772f, -0.186101f, -0.257387f, -0.169401f, -0.263488f, 0.370405f, -0.462936f, -0.188147f, -0.332351f, 0.0125391f, 0.215156f, -0.513405f, -0.289543f, -0.443262f, -0.0851796f, -0.157583f, -0.22628f, 0.0640168f, 0.0691075f, 0.169624f, -0.0885214f, 0.0678881f, -0.178388f, 0.11724f, -0.0459048f, 0.0283356f, 0.135743f, 0.21108f, 0.197132f, -0.298021f, -0.127577f, -0.0454851f, -0.295987f, -0.113867f, 0.0862119f, -0.0201072f, -0.290276f, 0.0147507f, -0.247042f, 0.420167f, -0.376847f, 0.203432f, -0.158043f, 0.0810597f, -0.566199f, 0.218187f, -0.318247f, -0.400209f, -0.219316f, -0.0448023f, -0.357235f, -0.26102f, -0.303588f, 0.00072887f, -0.205802f, -0.175228f, -0.0968084f, -0.0754828f, 0.047413f, 0.131296f, -0.112247f, 0.183774f, 0.0840453f, -0.0239575f, 0.0597386f, 0.0678879f, 0.208753f, -0.381256f, 0.0543436f, 0.0230677f, -0.275275f, 0.197361f, 0.318349f, 0.230976f, -0.0475114f, 0.0923948f, 0.270554f, 0.0193927f, -0.0845898f, -0.074267f, -0.185875f, 0.329959f, -0.00671641f, -0.19907f, -0.208328f, 0.089362f, 0.0418336f, -0.054819f, 0.138547f, 0.318673f, 0.300046f, -0.149823f, -0.146389f, -0.178329f, 0.260826f, -0.0446269f, 0.22329f, 0.0233915f, -0.408598f, -0.210239f, -0.0839846f, -0.210073f, -0.203917f, 0.333065f, 0.0654963f, -0.110438f, 0.0976637f, -0.171706f, -0.0396424f, 0.196927f, 0.107167f, -0.526091f, -0.272819f, -0.0621517f, -0.360691f, -0.0803204f, -0.0894648f, -0.215345f, 0.0738301f, -0.165395f, -0.505362f, -0.510371f, 0.495546f, 0.281085f, -0.349988f, -0.102217f, 0.29955f, 0.101695f, 0.216987f, 0.220804f, -0.264158f, 0.208857f, 0.490646f, -0.235616f, 0.0697848f, -0.0828848f, -0.0676367f, -0.137579f, 0.0101326f, -0.0646971f, -0.245946f, -0.0958766f, -0.274682f, -0.467907f, 0.0970127f, -0.254426f, 0.03253f, 0.0122821f, -0.0339391f, -0.364834f, 0.164962f, -0.180429f, -0.378582f, -0.00960021f, -0.228418f, -0.0264938f, 0.0259812f, -0.295185f, -0.357585f, -0.380096f, 0.0525056f, -0.233331f, 0.13387f, 0.105961f, 0.243387f, 0.258494f, 0.0371437f, 0.0632561f, 0.110992f, -0.208983f, -0.185678f, 0.292418f, 0.0286353f, -0.00408131f, 0.102217f, -0.136994f, 0.0622825f, 0.395963f, -0.348133f, -0.223302f, 0.273627f, -0.193556f, 0.338264f, -0.159462f, -0.491361f, 0.161778f, 0.156135f, 0.0641617f, 0.0999903f, -0.529532f, -0.285966f, -0.135576f, 0.236579f, -0.130519f, -0.0764042f, 0.493032f, -0.0883978f, 0.150384f, 0.106229f, 0.02975f, 0.318695f, 0.265394f, 0.130223f, -0.0455514f, -0.115114f, 0.107133f, -0.250837f, -0.0966183f, -0.123644f, 0.342727f, -0.0986773f, -0.0127951f, -0.434297f, -0.0685123f, 0.0869741f, -0.269507f, 0.396272f, 0.305987f, 0.145169f, -0.250147f, 0.0425825f, -0.27173f, -0.0943471f, -0.401917f, -0.0518213f, 0.220465f, -0.00776957f, -0.308669f, 0.151246f, 0.040435f, -0.246938f, 0.161326f, -0.657021f, -0.029663f, -0.156154f, -0.0231731f, -0.0567502f, -0.149723f, -0.157589f, -0.0150168f, 0.143093f, 0.0119803f, -0.282194f, 0.00609295f, 0.133509f, -0.238658f, 0.469585f, -0.15437f, 0.123749f, -0.438739f, -0.235357f, 0.196981f, -0.178078f, 0.179464f, -0.360465f, 0.146581f, -0.0722637f, -0.359168f, -0.0213761f, -0.0719016f, 0.228349f, 0.00872679f, -0.0720084f, 0.0129347f, -0.0606057f, 0.209901f, 0.261428f, 0.318637f, 0.0668506f, 0.262152f, -0.188527f, 0.017398f, 0.238802f, -0.119243f, -0.335925f, -0.0708997f, 0.0131007f, -0.183616f, 0.139393f, 0.229401f, -0.0356139f, 0.117969f, -0.0359544f, -0.0976415f, -0.261919f, -0.132652f, 0.0511542f, 0.0250922f, -0.202336f, 0.156581f, -0.21006f, -0.164616f, 0.49608f, -0.143283f, 0.0167009f, 0.0382558f, -0.192059f, -0.0298086f, 0.16408f, 0.0327906f, -0.0112998f, 0.107964f, -0.805638f, 0.341425f, 0.104876f, -0.379418f, -0.16812f, 0.0873235f, -0.591176f, 0.347932f, -0.092094f, -0.0951583f, -0.079231f, -0.102f, 0.430467f, -0.0629909f, 0.103386f, -0.394243f, 0.0921294f, -0.303268f, -0.0878409f, 0.0222568f, 0.177541f, 0.05269f, -0.245371f, -0.394972f, 0.169095f, -0.0322228f, 0.0854907f, -0.277685f, 0.169834f, -0.157112f, -0.125601f, -0.123642f, 0.287326f, -0.11461f, -0.0400871f, 0.0935002f, -0.239499f, -0.00406349f, 0.116467f, 0.195647f, 0.0169376f, 0.108949f, -0.256211f, 0.199251f, -0.22503f, 0.183724f, -0.0459538f, -0.0573185f, -0.135267f, -0.17563f, -0.105615f, -0.216777f, 0.136895f, -0.131041f, 0.143448f, 0.116321f, 0.341659f, 0.04663f, -0.138582f, 0.113484f, 0.000281706f, 0.183075f, -0.205364f, 0.217528f, -0.0325774f, -0.0481017f, -0.00686094f, -0.13989f, 0.0995296f, -0.476637f, 0.120914f, 0.178213f, 0.11095f, -0.154424f, 0.169363f, 0.288232f, 0.105104f, 0.440652f, 0.0404736f, -0.163574f, -0.0724218f, -0.174028f, 0.137715f, 0.255176f, -0.133188f, -0.10359f, -0.150963f, -0.0850369f, 0.162774f, -0.00694466f, -0.523244f, -0.400547f, -0.11478f, 0.0923003f, 0.00922158f, 0.165169f, 0.114364f, 0.396211f, 0.0621255f, 0.413189f, 0.0759307f, -0.148507f, 0.243803f, 0.066523f, -0.0649491f, 0.0867938f, 0.134912f, -0.44741f, 0.133082f, 0.0237098f, -0.327549f, -0.0172026f, -0.104394f, -0.204443f, 0.0804548f, -0.25669f, -0.280141f, 0.184742f, -0.182915f, -0.301567f, -0.132653f, -0.362342f, -0.0867399f, -0.248574f, 0.018783f, -0.0144377f, -0.193732f, -0.0568637f, 0.0212203f, 0.145462f, -0.04467f, 0.188485f, -0.0192423f, -0.162427f, -0.431459f, -0.316196f, -0.0197834f, 0.142554f, 0.161446f, -0.204556f, 0.10123f, 0.136505f, -0.0421437f, 0.0382004f, -0.0105015f, 0.26352f, 0.128504f, 0.220373f, -0.0459283f, -0.0794771f, 0.126873f, 0.102329f, 0.160555f, -0.344226f, 0.11844f, -0.152884f, -0.369259f, -0.732194f, -0.285659f, 0.27297f, 0.0434638f, -0.115029f, -0.178296f, -0.010171f, -0.108856f, 0.243398f, -0.120003f, 0.0617609f, -0.0377697f, 0.0882623f, 0.317397f, -0.142634f, 0.0613519f, 0.0625693f, 0.29804f, -0.276065f, -0.283755f, -0.0586926f, 0.0609932f, 0.172328f, 0.380084f, 0.0817355f, -0.0889897f, 0.16975f, -0.0727911f, 0.558122f, 0.129139f, 0.0967012f, -0.00808779f, -0.281368f, 0.229454f, -0.0657459f, 0.110639f, 0.0990761f, -0.0734602f, -0.124961f, 0.120193f, 0.0117927f, -0.00164934f, -0.068704f, 0.0934271f, -0.150389f, 0.267866f, 0.111924f, 0.22073f, -0.0826743f, 0.0181881f, 0.164808f, 0.08553f, 0.0064627f, -0.100066f, -0.196847f, -0.260685f, -0.161078f, -0.0889612f, 0.267343f, -0.183189f, 0.099878f, 0.206179f, -0.134037f, -0.0753274f, 0.073361f, 0.123856f, -0.11014f, -0.23651f, -0.079332f, -0.179564f, -0.0953625f, 0.0816014f, -0.0153009f, 0.0216921f, -0.214616f, 0.0721763f, -0.337629f, 0.113998f, 0.30383f, 0.213949f, 0.0748996f, -0.154083f, 0.082343f, 0.0915755f, -0.165324f, -0.161256f, -0.0732527f, -0.0771391f, -0.179746f, 0.148814f, -0.229269f, -0.00684043f, -0.0877735f, -0.232043f, 0.0358457f, 0.0860737f, -0.016937f, 0.0052483f, 0.203986f, -0.0327027f, 0.0828824f, 0.0515511f, -0.0446207f, 0.0495584f, 0.06504f, -0.0502581f, -0.0989093f, -0.242931f, -0.161322f, 0.0412978f, 0.0882053f, -0.0868244f, 0.0333411f, 0.0033292f, 0.0956053f, 0.224343f, -0.0605414f, 0.200487f, 0.139677f, 0.0741737f, 0.131144f, -0.0156217f, 0.119855f, -0.0672591f, 0.0646749f, 0.0212678f, -0.0612522f, 0.127438f, 0.165742f, 0.149455f, 0.120228f, 0.245928f, -0.536011f, -0.0221017f, 0.0210271f, 0.196356f, 0.0401149f, -0.00733165f, -0.270396f, -0.00968083f, -0.0709557f, -0.120717f, 0.140489f, 0.0935343f, -0.172696f, 0.301435f, -0.0935873f, -0.0353977f, 0.0539549f, -0.0338224f, -0.239903f, -0.0209894f, -0.17114f, 0.267786f, 0.20251f, -0.0980189f, -0.04852f, -0.207071f, -0.253257f, -0.0564701f, -0.0518127f, -0.0537929f, -0.390881f, 0.0470064f, 0.0306878f, 0.104422f, 0.150282f, 0.0117885f, -0.093087f, -0.0377776f, -0.0618607f, -0.0869537f, 0.137726f, 0.0903727f, 0.0346921f, 0.0111f, -0.241767f, -0.201946f, 0.09471f, -0.156048f, -0.0978701f, -0.239229f, -0.0308635f, -0.122071f, -0.433478f, -0.0514787f, -0.182472f, -0.181954f, 0.0416541f, -0.0883368f, 0.157402f, -0.462445f, -0.103609f, -0.160994f, -0.0133393f, -0.096508f, 0.100438f, 0.00418135f, -0.0122206f, 0.172408f, 0.0437795f, -0.172367f, -0.0189107f, -0.0304423f, 0.0780768f, -0.116228f, -0.0305065f, -0.0440305f, 0.00286725f, -0.157059f, 0.132452f, -0.101883f, -0.138483f, 0.00723927f, 0.0342281f, 0.206677f, -0.0770022f, 0.0227105f, -0.111016f, -0.170921f, 0.055846f, 0.246527f, -0.142554f, -0.380108f, -0.0346903f, 0.138706f, -0.176424f, 0.112018f, 0.0435032f, -0.127998f, -0.169885f, -0.0509104f, -0.0870096f, -0.535699f, -0.0638343f, -0.0311837f, 0.078099f, -0.0342351f, 0.0749799f, 0.3883f, -0.154977f, 0.224178f, 0.0550229f, 0.107375f, 0.33049f, 0.0969202f, 0.0756623f, -0.233299f, -0.104361f, 0.442374f, 0.0844492f, 0.0705411f, -0.140545f, -0.0663961f, -0.0728755f, -0.0621244f, -0.0819853f, -0.112193f, -0.176114f, -0.0938139f, -0.214228f, 0.0190762f, -0.213562f, -0.190233f, 0.133314f, -0.148665f, 0.0915799f, 0.187216f, -0.284974f, 0.00733069f, 0.0156916f, 0.015107f, 0.0318654f, 0.346104f, -0.124227f, 0.137341f, 0.0592528f, -0.387351f, -0.221991f, 0.360592f, -0.0931174f, -0.0492834f, 0.199867f, -0.0852204f, 0.150399f, 0.0413833f, 0.235906f, -0.0706518f, -0.166653f, -0.0586646f, -0.109711f, -0.0823073f, 0.257342f, -0.224644f, -0.430506f, -0.105588f, 0.0250296f, -0.042311f, -0.0996558f, -0.115579f, -0.286667f, -0.154598f, -0.137322f, 0.176363f, 0.088216f, 0.161978f, 0.255623f, -0.0123169f, -0.00387241f, -0.318043f, -0.21894f, -0.412465f, -0.415855f, 0.255024f, 0.361044f, 0.0300423f, -0.119439f, 0.0657428f, -0.238206f, 0.340391f, 0.201176f, 0.102395f, 0.216324f, -0.121531f, 0.265799f, 0.0327802f, 0.194072f, -0.0792337f, 0.456093f, 0.0971469f, -0.0170099f, -0.0294468f, -0.318039f, -0.242527f, -0.1083f, 0.295943f, -0.0284033f, -0.156199f, -0.20311f, -0.075091f, 0.528829f, -0.165604f, 0.0532403f, 0.0505752f, -0.413034f, 0.175453f, -0.0970195f, -0.029351f, 0.103333f, 0.271092f, 0.0511197f, -0.182135f, 0.112932f, -0.32439f, 0.294457f, -0.0818895f, 0.0914322f, 0.185025f, 0.0543957f, -0.0167575f, 0.504046f, -0.0647153f, -0.166975f, 0.0248059f, 0.0379442f, 0.0980366f, -0.178135f, 0.143822f, 0.45732f, -0.0912428f, -0.179338f, 0.349726f, -0.0596313f, -0.299861f, 0.112567f, 0.0666395f, 0.345303f, 0.164124f, -0.00265316f, -0.0732412f, 0.348079f, -0.249414f, 0.0465329f, 0.0693596f, 0.0799214f, 0.000123214f, 0.180679f, 0.0912923f, -0.300121f, -0.288428f, 0.150135f, 0.112936f, 0.104813f, -0.0555879f, -0.00205972f, -0.0251151f, -0.0788264f, -0.016778f, -0.110796f, -0.083048f, -0.212734f, 0.288568f, -0.114228f, -0.113358f, 0.110789f, 0.118645f, 0.133466f, -0.0298552f, -0.241374f, 0.157257f, 0.0861554f, -0.0909277f, 0.00156177f, 0.106539f, -0.209104f, -0.106974f, 0.0203283f, -0.18111f, -0.311602f, -0.00371812f, 0.0711113f, -0.206721f, 0.286076f, 0.139713f, 0.116621f, 0.182792f, 0.0246107f, -0.17972f, 0.041917f, 0.0724635f, 0.266344f, 0.0989191f, 0.0723898f, 0.0257298f, 0.104898f, 0.0681826f, -0.0704781f, 0.00212139f, -0.363547f, 0.0274255f, -0.106295f, -0.363965f, 0.127051f, -0.0575343f, -0.200952f, -0.0666189f, -0.139465f, -0.0171747f, 0.253794f, -0.258602f, -0.166356f, -0.107649f, 0.267331f, 0.104521f, -0.020921f, -0.0780469f, 0.125002f, 0.0202556f, -0.0899181f, -0.126559f, -0.297855f, 0.121539f, -0.0671643f, -0.0444782f, 0.334408f, 0.0882725f, -0.0879492f, -0.00277655f, -0.0616985f, 0.0564236f, -0.11618f, -0.22836f, 0.112953f, 0.176082f, 0.09988f, -0.00635589f, -0.114234f, 0.241135f, 0.0966775f, -0.0961065f, 0.137214f, -0.0832349f, -0.54299f, -0.2335f, -0.033801f, -0.11505f, -0.366386f, -0.238099f, -0.0951656f, 0.263106f, 0.129292f, -0.14762f, 0.0700404f, 0.0195349f, -0.286227f, -0.273371f, 0.0587288f, -0.257152f, -0.136248f, -0.13336f, -0.248086f, 0.273973f, -0.302625f, -0.085841f, -0.0839808f, -0.130464f, 0.252972f, -0.0415149f, -0.0695038f, -0.091557f, -0.262375f, -0.0645785f, 0.188566f, -0.202261f, -0.112712f, 0.00631479f, 0.0132917f, -0.0130675f, -0.302285f, 0.0556928f, -0.0211812f, -0.0555546f, 0.0291112f, 0.168815f, 0.143654f, -0.00564186f, -0.0614248f, -0.0939664f, 0.0959667f, -0.209823f, -0.103889f, -0.206011f, -0.0394793f, 0.0545815f, -0.0348762f, -0.132075f, -0.0489917f, -0.177563f, -0.164591f, -0.0174372f, -0.276844f, -0.132214f, -0.236278f, -0.0614254f, -0.230962f, -0.409367f, -0.08959f, 0.182197f, -0.341314f, -0.0645579f, -0.0161434f, -0.166644f, -0.0784324f, -0.387537f, 0.236617f, -0.115318f, -0.11315f, -0.109817f, -0.0949309f, -0.253715f, -0.254404f, -0.0876592f, -0.243118f, -0.219172f, 0.0341202f, 0.0203343f, 0.0435131f, -0.0266338f, 0.140304f, -0.20669f, -0.130739f, 0.0213059f, 0.182793f, -0.0711616f, -0.165651f, -0.212373f, -0.0972764f, -0.284464f, -0.0834676f, -0.129573f, -0.133945f, 0.0684521f, -0.133913f, 0.165726f, -0.176839f, -0.0940447f, -0.145421f, -0.0471074f, 0.00950449f, 0.0308656f, -0.00761046f, -0.19397f, -0.161623f, 0.10975f, -0.0398157f, 0.00168868f, 0.0626417f, -0.118388f, -0.134741f, -0.243707f, 0.146451f, -0.165854f, 0.0585878f, 0.0269307f, 0.163195f, -0.197056f, 0.0438799f, -0.152668f, -0.178631f, -0.167278f, 0.0258257f, -0.22958f, -0.101918f, 0.0360034f, -0.165612f, -0.112482f, -0.419959f, -0.369384f, 0.0468117f, 0.202511f, 0.161559f, 0.0360435f, -0.211843f, 0.0480519f, -0.252478f, -0.0951382f, 0.100791f, -0.379245f, -0.129869f, -0.036501f, 0.0685223f, 0.0247177f, -0.0751386f, -0.12451f, 0.244585f, -0.0103249f, -0.346383f, -0.300614f, 0.230366f, -0.187795f, -0.0326416f, 0.0735751f, -0.0136039f, -0.0219528f, 0.0629145f, -0.0308739f, -0.101514f, -0.169444f, 0.058706f, -0.133274f, -0.200294f, -0.372511f, -0.214898f, -0.184366f, 0.253648f, -0.0362453f, 0.0618937f, 0.0838244f, -0.0386255f, 0.129191f, -0.147435f, -0.180809f, -0.0797491f, -0.286544f, -0.273005f, 0.116222f, -0.255255f, -0.0504643f, -0.0567216f, -0.0204081f, 0.206331f, -0.225266f, -0.211665f, -0.259216f, -0.0676753f, -0.176153f, 0.285802f, -0.00560349f, -0.0253936f, -0.182537f, -0.344487f, -0.341246f, -0.171879f, 0.24462f, 0.015354f, -0.0255803f, -0.0855239f, -0.151488f, -0.0329621f, 0.311794f, 0.0889872f, -0.142655f, -0.00124048f, 0.0175189f, 0.0459686f, 0.279491f, -0.237445f, 0.0570048f, -0.00665275f, -0.0558817f, 0.0731352f, 0.0291331f, 0.0918153f, 0.0276626f, -0.135103f, -0.303909f, 0.0283329f, -0.203482f, -0.0849922f, -0.284485f, -0.214908f, 0.0836636f, -0.219738f, 0.136157f, 0.0332432f, -0.143305f, 0.0283252f, -0.178703f, -0.0742534f, -0.153174f, 0.02235f, -0.0753622f, -0.210102f, -0.0915751f, -0.0189732f, -0.239039f, -0.135349f, -0.104589f, -0.0658414f, -0.183206f, -0.123006f, 0.0835748f, -0.0703047f, -0.207461f, -0.274129f, -0.225327f, -0.113485f, 0.13316f, 0.0295303f, -0.0958281f};
   model->setOperandValue(op84, op84_init, sizeof(float) * 2816);
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op86, op2, op1, b87, b88, b89, b90}, {op0});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op0, op29, op28, b91, b92, b93, b94, b95}, {op27});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op27, op32, op31, b96, b97, b98, b99}, {op30});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op30, op35, op34, b100, b101, b102, b103, b104}, {op33});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op33, op38, op37, b105, b106, b107, b108}, {op36});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op36, op41, op40, b109, b110, b111, b112, b113}, {op39});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op39, op44, op43, b114, b115, b116, b117}, {op42});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op42, op47, op46, b118, b119, b120, b121, b122}, {op45});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op45, op50, op49, b123, b124, b125, b126}, {op48});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op48, op53, op52, b127, b128, b129, b130, b131}, {op51});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op51, op56, op55, b132, b133, b134, b135}, {op54});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op54, op59, op58, b136, b137, b138, b139, b140}, {op57});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op57, op62, op61, b141, b142, b143, b144}, {op60});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op60, op65, op64, b145, b146, b147, b148, b149}, {op63});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op63, op68, op67, b150, b151, b152, b153}, {op66});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op66, op71, op70, b154, b155, b156, b157, b158}, {op69});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op69, op74, op73, b159, b160, b161, b162}, {op72});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op72, op77, op76, b163, b164, b165, b166, b167}, {op75});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op75, op80, op79, b168, b169, b170, b171}, {op78});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op78, op5, op4, b172, b173, b174, b175, b176}, {op3});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op3, op8, op7, b177, b178, b179, b180}, {op6});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op6, op11, op10, b181, b182, b183, b184, b185}, {op9});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op9, op14, op13, b186, b187, b188, b189}, {op12});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op12, op17, op16, b190, b191, b192, b193, b194}, {op15});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op15, op20, op19, b195, b196, b197, b198}, {op18});
-  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op18, op23, op22, b199, b200, b201, b202, b203}, {op21});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op21, op26, op25, b204, b205, b206, b207}, {op24});
-  model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {op24, b208, b209, b210, b211, b212, b213}, {op81});
-  model->addOperation(ANEURALNETWORKS_CONV_2D, {op81, op84, op83, b214, b215, b216, b217}, {op82});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op86, op2, op1, pad0, pad1, pad0, pad1, b88, b89, b90}, {op0});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op0, op29, op28, pad1, pad1, pad1, pad1, b92, b93, b94, b95}, {op27});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op27, op32, op31, pad0, pad0, pad0, pad0, b97, b98, b99}, {op30});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op30, op35, op34, pad0, pad1, pad0, pad1, b101, b102, b103, b104}, {op33});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op33, op38, op37, pad0, pad0, pad0, pad0, b106, b107, b108}, {op36});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op36, op41, op40, pad1, pad1, pad1, pad1, b110, b111, b112, b113}, {op39});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op39, op44, op43, pad0, pad0, pad0, pad0, b115, b116, b117}, {op42});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op42, op47, op46, pad0, pad1, pad0, pad1, b119, b120, b121, b122}, {op45});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op45, op50, op49, pad0, pad0, pad0, pad0, b124, b125, b126}, {op48});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op48, op53, op52, pad1, pad1, pad1, pad1, b128, b129, b130, b131}, {op51});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op51, op56, op55, pad0, pad0, pad0, pad0, b133, b134, b135}, {op54});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op54, op59, op58, pad0, pad1, pad0, pad1, b137, b138, b139, b140}, {op57});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op57, op62, op61, pad0, pad0, pad0, pad0, b142, b143, b144}, {op60});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op60, op65, op64, pad1, pad1, pad1, pad1, b146, b147, b148, b149}, {op63});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op63, op68, op67, pad0, pad0, pad0, pad0, b151, b152, b153}, {op66});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op66, op71, op70, pad1, pad1, pad1, pad1, b155, b156, b157, b158}, {op69});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op69, op74, op73, pad0, pad0, pad0, pad0, b160, b161, b162}, {op72});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op72, op77, op76, pad1, pad1, pad1, pad1, b164, b165, b166, b167}, {op75});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op75, op80, op79, pad0, pad0, pad0, pad0, b169, b170, b171}, {op78});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op78, op5, op4, pad1, pad1, pad1, pad1, b173, b174, b175, b176}, {op3});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op3, op8, op7, pad0, pad0, pad0, pad0, b178, b179, b180}, {op6});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op6, op11, op10, pad1, pad1, pad1, pad1, b182, b183, b184, b185}, {op9});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op9, op14, op13, pad0, pad0, pad0, pad0, b187, b188, b189}, {op12});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op12, op17, op16, pad0, pad1, pad0, pad1, b191, b192, b193, b194}, {op15});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op15, op20, op19, pad0, pad0, pad0, pad0, b196, b197, b198}, {op18});
+  model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op18, op23, op22, pad1, pad1, pad1, pad1, b200, b201, b202, b203}, {op21});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op21, op26, op25, pad0, pad0, pad0, pad0, b205, b206, b207}, {op24});
+  model->addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {op24, pad0, pad0, pad0, pad0, b209, b210, b211, b212, b213}, {op81});
+  model->addOperation(ANEURALNETWORKS_CONV_2D, {op81, op84, op83, pad0, pad0, pad0, pad0, b215, b216, b217}, {op82});
   model->addOperation(ANEURALNETWORKS_LOGISTIC, {op82}, {op85});
   // Phase 3, inputs and outputs
   model->setInputsAndOutputs(
diff --git a/nn/runtime/test/specs/avg_pool_float.mod.py b/nn/runtime/test/specs/avg_pool_float.mod.py
index 6134739..7d84d7f 100644
--- a/nn/runtime/test/specs/avg_pool_float.mod.py
+++ b/nn/runtime/test/specs/avg_pool_float.mod.py
@@ -2,9 +2,10 @@
 model = Model()
 i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
 cons1 = Int32Scalar("cons1", 1)
+pad0 = Int32Scalar("pad0", 0)
 act = Int32Scalar("act", 0)
 i3 = Output("op3", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # output 0
-model = model.Operation("AVERAGE_POOL_2D", i1, cons1, cons1, cons1, cons1, cons1, act).To(i3)
+model = model.Operation("AVERAGE_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(i3)
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
           [1.0, 2.0, 3.0, 4.0]}
diff --git a/nn/runtime/test/specs/avg_pool_quant8.mod.py b/nn/runtime/test/specs/avg_pool_quant8.mod.py
index 8304c39..88da209 100644
--- a/nn/runtime/test/specs/avg_pool_quant8.mod.py
+++ b/nn/runtime/test/specs/avg_pool_quant8.mod.py
@@ -2,9 +2,10 @@
 model = Model()
 i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "0.0f, 127.5f, {1, 2, 2, 1}")
 cons1 = Int32Scalar("cons1", 1)
+pad0 = Int32Scalar("pad0", 0)
 act = Int32Scalar("act", 0)
 o = Output("op3", "TENSOR_QUANT8_ASYMM", "0.0f, 127.5f, {1, 2, 2, 1}")
-model = model.Operation("AVERAGE_POOL_2D", i1, cons1, cons1, cons1, cons1, cons1, act).To(o)
+model = model.Operation("AVERAGE_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(o)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
diff --git a/nn/runtime/test/specs/l2_pool_float.mod.py b/nn/runtime/test/specs/l2_pool_float.mod.py
index 65e8240..599b5f7 100644
--- a/nn/runtime/test/specs/l2_pool_float.mod.py
+++ b/nn/runtime/test/specs/l2_pool_float.mod.py
@@ -2,9 +2,10 @@
 model = Model()
 i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
 cons1 = Int32Scalar("cons1", 1)
+pad0 = Int32Scalar("pad0", 0)
 act = Int32Scalar("act", 0)
 i3 = Output("op3", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # output 0
-model = model.Operation("L2_POOL_2D", i1, cons1, cons1, cons1, cons1, cons1, act).To(i3)
+model = model.Operation("L2_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(i3)
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
           [1.0, 2.0, 3.0, 4.0]}
diff --git a/nn/runtime/test/specs/max_pool_float.mod.py b/nn/runtime/test/specs/max_pool_float.mod.py
index f2f37fc..2c3ed90 100644
--- a/nn/runtime/test/specs/max_pool_float.mod.py
+++ b/nn/runtime/test/specs/max_pool_float.mod.py
@@ -2,9 +2,10 @@
 model = Model()
 i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # input 0
 cons1 = Int32Scalar("cons1", 1)
+pad0 = Int32Scalar("pad0", 0)
 act = Int32Scalar("act", 0)
 i3 = Output("op3", "TENSOR_FLOAT32", "{1, 2, 2, 1}") # output 0
-model = model.Operation("MAX_POOL_2D", i1, cons1, cons1, cons1, cons1, cons1, act).To(i3)
+model = model.Operation("MAX_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(i3)
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
           [1.0, 2.0, 3.0, 4.0]}