Merge "NNAPI: validate that FmqResultDatum padding is 0 -- runtime" into qt-dev
diff --git a/nn/common/ExecutionBurstServer.cpp b/nn/common/ExecutionBurstServer.cpp
index 7139899..9bdcfdb 100644
--- a/nn/common/ExecutionBurstServer.cpp
+++ b/nn/common/ExecutionBurstServer.cpp
@@ -121,10 +121,12 @@
     // package output shape data
     for (const auto& operand : outputShapes) {
         // package operand information
+        FmqResultDatum::OperandInformation info{};
+        info.isSufficient = operand.isSufficient;
+        info.numberOfDimensions = static_cast<uint32_t>(operand.dimensions.size());
+
         FmqResultDatum datum;
-        datum.operandInformation(
-                {/*.isSufficient=*/operand.isSufficient,
-                 /*.numberOfDimensions=*/static_cast<uint32_t>(operand.dimensions.size())});
+        datum.operandInformation(info);
         data.push_back(datum);
 
         // package operand dimensions
diff --git a/nn/common/OperationsUtils.cpp b/nn/common/OperationsUtils.cpp
index 3338493..99c1178 100644
--- a/nn/common/OperationsUtils.cpp
+++ b/nn/common/OperationsUtils.cpp
@@ -210,20 +210,18 @@
     return true;
 }
 
-bool GetQuantizedConvolutionMultipler(const Shape& inputShape,
-                                      const Shape& filterShape,
-                                      const Shape& biasShape,
-                                      const Shape& outputShape,
-                                      float* multiplier) {
-    const float input_product_scale = inputShape.scale * filterShape.scale;
-    const float bias_scale = biasShape.scale;
-    const float output_scale = outputShape.scale;
+bool GetQuantizedConvolutionMultipler(const Shape& inputShape, const Shape& filterShape,
+                                      const Shape& biasShape, const Shape& outputShape,
+                                      double* multiplier) {
+    // Upcast bias and input_product to double
+    const double input_product_scale = inputShape.scale * filterShape.scale;
+    const double bias_scale = biasShape.scale;
 
     // The following conditions must be guaranteed by the training pipeline.
     NN_OPS_CHECK(std::abs(input_product_scale - bias_scale) <=
               1e-6 * std::min(input_product_scale, bias_scale));
     NN_OPS_CHECK(input_product_scale >= 0);
-    *multiplier = input_product_scale / output_scale;
+    *multiplier = input_product_scale / outputShape.scale;
     return true;
 }
 
diff --git a/nn/common/Utils.cpp b/nn/common/Utils.cpp
index c747a05..bac31cc 100644
--- a/nn/common/Utils.cpp
+++ b/nn/common/Utils.cpp
@@ -652,6 +652,20 @@
                            << getOperationName(opType);
                 return ANEURALNETWORKS_BAD_DATA;
             }
+
+            // NeuralNetworks.h specifies that ANEURALNETWORKS_DEPTHWISE_CONV_2D's output must
+            // meet "outputScale > inputScale * filterScale" for the operand type
+            // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM before API level 29. For other
+            // operand types (e.g., ANEURALNETWORKS_TENSOR_FLOAT32), this constraint
+            // does not apply, so by default the constraint is met.
+            bool meetsQuantizedScaleConstraintBeforeV1_2 = true;
+            if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
+                const float inputScale = operands[inputIndexes[0]].scale;
+                const float filterScale = operands[inputIndexes[1]].scale;
+                const float outputScale = operands[outputIndexes[0]].scale;
+                meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * filterScale);
+            }
+
             bool withExplicitPadding = false;
             bool withLayout = false;
             bool withDilation = false;
@@ -682,7 +696,7 @@
 
             if (inputType == OperandType::TENSOR_FLOAT16 ||
                 filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || withLayout ||
-                withDilation) {
+                withDilation || !meetsQuantizedScaleConstraintBeforeV1_2) {
                 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
             } else {
                 NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_0));
@@ -1005,6 +1019,7 @@
                 inExpectedTypes.push_back(OperandType::FLOAT32);
                 inExpectedTypes.push_back(OperandType::FLOAT32);
             } else {
+                NN_RETURN_IF_ERROR(validateHalVersion(opType, halVersion, HalVersion::V1_2));
                 inExpectedTypes.push_back(OperandType::FLOAT16);
                 inExpectedTypes.push_back(OperandType::FLOAT16);
             }
diff --git a/nn/common/include/OperationsUtils.h b/nn/common/include/OperationsUtils.h
index 7beaf08..879fbcc 100644
--- a/nn/common/include/OperationsUtils.h
+++ b/nn/common/include/OperationsUtils.h
@@ -188,12 +188,9 @@
                                       int32_t* quantized_multiplier,
                                       int* left_shift);
 
-__wur
-bool GetQuantizedConvolutionMultipler(const Shape& inputShape,
-                                      const Shape& filterShape,
-                                      const Shape& biasShape,
-                                      const Shape& outputShape,
-                                      float* multiplier);
+__wur bool GetQuantizedConvolutionMultipler(const Shape& inputShape, const Shape& filterShape,
+                                            const Shape& biasShape, const Shape& outputShape,
+                                            double* multiplier);
 
 void CalculateActivationRangeUint8(int32_t activation,
                                    const Shape& outputShape,
diff --git a/nn/common/operations/Broadcast.cpp b/nn/common/operations/Broadcast.cpp
index a575a7e..2570672 100644
--- a/nn/common/operations/Broadcast.cpp
+++ b/nn/common/operations/Broadcast.cpp
@@ -363,6 +363,12 @@
             NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_2, opIntroducedAt)));
         } else if (opType == OperationType::DIV) {
             NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation DIV";
+        } else if (opType == OperationType::MUL) {
+            Shape output = context->getOutputShape(kOutputTensor);
+            Shape input1 = context->getInputShape(kInputTensor1);
+            Shape input2 = context->getInputShape(kInputTensor2);
+            NN_RET_CHECK_GT(output.scale, input1.scale * input2.scale);
+            NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_0, opIntroducedAt)));
         } else {
             NN_RET_CHECK(validateHalVersion(context, std::max(HalVersion::V1_0, opIntroducedAt)));
         }
diff --git a/nn/common/operations/Conv2D.cpp b/nn/common/operations/Conv2D.cpp
index 6bfb214..3490a4c 100644
--- a/nn/common/operations/Conv2D.cpp
+++ b/nn/common/operations/Conv2D.cpp
@@ -200,7 +200,7 @@
     int32_t filterOffset = -filterShape.offset;
     int32_t outputOffset = outputShape.offset;
 
-    float real_multiplier = 0.0;
+    double real_multiplier = 0.0;
     int32_t output_multiplier = 0;
     int32_t output_shift = 0;
     int32_t output_activation_min = 0;
@@ -303,7 +303,7 @@
     int32_t inputOffset = -inputShape.offset;
     int32_t outputOffset = outputShape.offset;
 
-    auto realMultiplier = std::vector<float>(outputDepth, .0f);
+    auto realMultiplier = std::vector<double>(outputDepth, .0f);
     auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
     auto outputShift = std::vector<int32_t>(outputDepth, .0f);
 
diff --git a/nn/common/operations/DepthwiseConv2D.cpp b/nn/common/operations/DepthwiseConv2D.cpp
index 40a70d8..1add079 100644
--- a/nn/common/operations/DepthwiseConv2D.cpp
+++ b/nn/common/operations/DepthwiseConv2D.cpp
@@ -106,7 +106,7 @@
 
     ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
 
-    float real_multiplier = 0.0;
+    double real_multiplier = 0.0;
     int32_t output_multiplier = 0;
     int32_t output_shift = 0;
     int32_t output_activation_min = 0;
@@ -172,7 +172,7 @@
     int32_t inputOffset = -inputShape.offset;
     int32_t outputOffset = outputShape.offset;
 
-    auto realMultiplier = std::vector<float>(outputDepth, .0f);
+    auto realMultiplier = std::vector<double>(outputDepth, .0f);
     auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
     auto outputShift = std::vector<int32_t>(outputDepth, .0f);
 
diff --git a/nn/common/operations/FullyConnected.cpp b/nn/common/operations/FullyConnected.cpp
index cfe27bc..542fb53 100644
--- a/nn/common/operations/FullyConnected.cpp
+++ b/nn/common/operations/FullyConnected.cpp
@@ -104,7 +104,7 @@
     int32_t weightsOffset = -weightsShape.offset;
     int32_t outputOffset = outputShape.offset;
 
-    float realMultiplier = 0.0;
+    double realMultiplier = 0.0;
     int32_t outputMultiplier = 0;
     int32_t outputShift = 0;
     int32_t outputActivationMin = 0;
@@ -161,7 +161,20 @@
                 OperandType::INT32,
         };
     } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) {
-        NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0));
+        // NeuralNetworks.h specifies that ANEURALNETWORKS_FULLY_CONNECTED's output must
+        // meet "outputScale > inputScale * weightsScale" for the operand type
+        // ANEURALNETWORKS_TENSOR_QUANT8_ASYMM before API level 29.
+        const float inputScale = context->getInputShape(kInputTensor).scale;
+        const float weightsScale = context->getInputShape(kWeightsTensor).scale;
+        const float outputScale = context->getOutputShape(kOutputTensor).scale;
+        bool meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * weightsScale);
+
+        if (!meetsQuantizedScaleConstraintBeforeV1_2) {
+            NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2));
+        } else {
+            NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_0));
+        }
+
         inExpectedTypes = {
                 OperandType::TENSOR_QUANT8_ASYMM,
                 OperandType::TENSOR_QUANT8_ASYMM,
diff --git a/nn/common/operations/GroupedConv2D.cpp b/nn/common/operations/GroupedConv2D.cpp
index 4d709a1..2644f80 100644
--- a/nn/common/operations/GroupedConv2D.cpp
+++ b/nn/common/operations/GroupedConv2D.cpp
@@ -109,7 +109,7 @@
     int32_t filterOffset = -filterShape.offset;
     int32_t outputOffset = outputShape.offset;
 
-    float realMultiplier = 0.0;
+    double realMultiplier = 0.0;
     int32_t outputMultiplier = 0;
     int32_t outputShift = 0;
     NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape,
@@ -187,7 +187,7 @@
     int32_t inputOffset = -inputShape.offset;
     int32_t outputOffset = outputShape.offset;
 
-    auto realMultiplier = std::vector<float>(outputDepth, .0f);
+    auto realMultiplier = std::vector<double>(outputDepth, .0f);
     auto outputMultiplier = std::vector<int32_t>(outputDepth, 0);
     auto outputShift = std::vector<int32_t>(outputDepth, 0);
 
diff --git a/nn/common/operations/Pooling.cpp b/nn/common/operations/Pooling.cpp
index 979da65..c32617f 100644
--- a/nn/common/operations/Pooling.cpp
+++ b/nn/common/operations/Pooling.cpp
@@ -319,8 +319,7 @@
     uint32_t outHeight = computeOutSize(height, param.filter_height, param.stride_height,
                                         param.padding_top, param.padding_bottom);
 
-    Shape output = context->getOutputShape(kOutputTensor);
-    output.type = input.type;
+    Shape output = input;
     if (param.useNchw) {
         output.dimensions = {batches, channels, outHeight, outWidth};
     } else {
diff --git a/nn/common/operations/Reshape.cpp b/nn/common/operations/Reshape.cpp
index 4eda089..2633812 100644
--- a/nn/common/operations/Reshape.cpp
+++ b/nn/common/operations/Reshape.cpp
@@ -210,11 +210,12 @@
                          const int32_t* padding, const Shape& paddingShape, T* outputData,
                          const Shape& outputShape) {
     // Needed by low level implementation, but not really used.
-    tflite::Dims<4> blockSizeDim;
+    tflite::RuntimeShape blockSizeDim;
     NNTRACE_COMP("optimized_ops::SpaceToBatchND");
-    tflite::optimized_ops::SpaceToBatchND(inputData, convertShapeToDims(inputShape), blockSize,
-                                          blockSizeDim, padding, convertShapeToDims(paddingShape),
-                                          outputData, convertShapeToDims(outputShape));
+    tflite::optimized_ops::SpaceToBatchND(
+            {.output_offset = outputShape.offset}, convertShapeToTflshape(inputShape), inputData,
+            blockSizeDim, blockSize, convertShapeToTflshape(paddingShape), padding,
+            convertShapeToTflshape(outputShape), outputData);
     return true;
 }
 template bool spaceToBatchGeneric<float>(const float* inputData, const Shape& inputShape,
diff --git a/nn/common/operations/TransposeConv2D.cpp b/nn/common/operations/TransposeConv2D.cpp
index 3f857ae..40115fa 100644
--- a/nn/common/operations/TransposeConv2D.cpp
+++ b/nn/common/operations/TransposeConv2D.cpp
@@ -193,7 +193,7 @@
     int32_t filterOffset = -filterShape.offset;
     int32_t outputOffset = outputShape.offset;
 
-    float realMultiplier = 0.0;
+    double realMultiplier = 0.0;
     int32_t outputMultiplier = 0;
     int32_t outputShift = 0;
     NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape,
@@ -327,7 +327,7 @@
     int32_t inputOffset = -inputShape.offset;
     int32_t outputOffset = outputShape.offset;
 
-    std::vector<float> realMultiplier(outputDepth, 0.0);
+    std::vector<double> realMultiplier(outputDepth, 0.0);
     std::vector<int32_t> outputMultiplier(outputDepth, 0);
     std::vector<int32_t> outputShift(outputDepth, 0);
     for (int i = 0; i < outputDepth; ++i) {
diff --git a/nn/runtime/include/NeuralNetworks.h b/nn/runtime/include/NeuralNetworks.h
index 81a79a2..1961c24 100644
--- a/nn/runtime/include/NeuralNetworks.h
+++ b/nn/runtime/include/NeuralNetworks.h
@@ -2075,7 +2075,8 @@
      * Supported tensor {@link OperandCode}:
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
-     * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+     * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (full support since API
+     *   level 29, see the output section)
      *
      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
      * With the default data layout NHWC, the data is stored in the order of:
@@ -2102,6 +2103,10 @@
      * Outputs:
      * * 0: A tensor of the same {@link OperandCode} as input0.
      *
+     *      NOTE: Before API level 29, the pad value for
+     *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
+     *      Since API level 29, the pad value is always the logical zero.
+     *
      * Available since API level 28.
      */
     ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
diff --git a/nn/runtime/test/TestCompliance.cpp b/nn/runtime/test/TestCompliance.cpp
index 0497ccf..cbded0c 100644
--- a/nn/runtime/test/TestCompliance.cpp
+++ b/nn/runtime/test/TestCompliance.cpp
@@ -74,7 +74,9 @@
 TEST_AVAILABLE_SINCE_V1_2(sub_v1_2, quant8)
 TEST_AVAILABLE_SINCE_V1_2(conv2d_v1_2, nchw)
 TEST_AVAILABLE_SINCE_V1_2(conv2d_v1_2, quant_output_multiplier_gt_1)
+TEST_AVAILABLE_SINCE_V1_2(fully_connected_v1_2, quant8_mult_gt_1)
 TEST_AVAILABLE_SINCE_V1_2(depthwise_conv2d_v1_2, nchw)
+TEST_AVAILABLE_SINCE_V1_2(depthwise_conv2d_v1_2, quant_output_multiplier_gt_1)
 TEST_AVAILABLE_SINCE_V1_2(avg_pool_v1_2, nchw)
 TEST_AVAILABLE_SINCE_V1_2(l2_pool_v1_2, nchw)
 TEST_AVAILABLE_SINCE_V1_2(max_pool_v1_2, nchw)
@@ -90,6 +92,7 @@
 TEST_AVAILABLE_SINCE_V1_2(local_response_normalization_v1_2, axis_dim4_axis0)
 TEST_AVAILABLE_SINCE_V1_2(softmax_v1_2, dim1_axis0)
 TEST_AVAILABLE_SINCE_V1_2(softmax_v1_2, axis_dim4_axis0)
+TEST_AVAILABLE_SINCE_V1_2(lstm_float16)
 
 TEST_AVAILABLE_SINCE_V1_1(div)
 TEST_AVAILABLE_SINCE_V1_1(sub)
diff --git a/nn/runtime/test/TestValidateOperations.cpp b/nn/runtime/test/TestValidateOperations.cpp
index 4452eb9..9967525 100644
--- a/nn/runtime/test/TestValidateOperations.cpp
+++ b/nn/runtime/test/TestValidateOperations.cpp
@@ -130,6 +130,11 @@
         EXPECT_TRUE(testMutatingOutputOperandCounts());
     }
 
+    void testFailure(int32_t expectedResult) {
+        int32_t result = addOperation(mValidInputs, mValidOutputs);
+        EXPECT_TRUE(expectedResult == result);
+    }
+
     bool testSuccess() {
         int32_t result = addOperation(mValidInputs, mValidOutputs);
         return ANEURALNETWORKS_NO_ERROR == result;
@@ -630,6 +635,25 @@
     simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
 }
 
+TEST(OperationValidationTest, MUL_quant8_bad_output_scale) {
+    uint32_t inputDimensions[4] = {2, 2, 2, 2};
+    ANeuralNetworksOperandType input1 =
+            getOpType(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, 4, inputDimensions);
+    ANeuralNetworksOperandType input2 = input1;
+    ANeuralNetworksOperandType output = input1;
+    input1.scale = 1.0f;
+    input2.scale = 1.0f;
+    output.scale = 0.5f;
+    ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
+                                             .dimensionCount = 0,
+                                             .dimensions = nullptr,
+                                             .scale = 0.0f,
+                                             .zeroPoint = 0};
+
+    OperationTestBase mulTest(ANEURALNETWORKS_MUL, {input1, input2, activation}, {output});
+    mulTest.testFailure(ANEURALNETWORKS_BAD_DATA);
+}
+
 void binaryOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
     uint32_t inputDimensions[] = {2, 2, 2, 2, 2};
     ANeuralNetworksOperandType input1 = getOpType(operandCode, 5, inputDimensions);
diff --git a/nn/runtime/test/fuzzing/TestRandomGraph.cpp b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
index 67ccbec..4a80b0e 100644
--- a/nn/runtime/test/fuzzing/TestRandomGraph.cpp
+++ b/nn/runtime/test/fuzzing/TestRandomGraph.cpp
@@ -130,11 +130,15 @@
         CompilationForDevice compilation;
         ASSERT_TRUE(compilation.initialize(model, device));
         Result compileReturn = compilation.finish();
-        if (featureLevel >= __ANDROID_API_Q__) {
-            ASSERT_EQ(compileReturn, Result::NO_ERROR);
-        } else {
-            ASSERT_TRUE(compileReturn == Result::NO_ERROR || compileReturn == Result::OP_FAILED);
+        // Even if the model is fully supported, the compilation may still fail, e.g. each operation
+        // is supported, but model is too big (too many operations and/or too-large constants) for
+        // device.
+        if (compileReturn == Result::OP_FAILED) {
+            ASSERT_FALSE(isRef);
+            std::cout << "[          ]   SKIP: " << name << " failed at compilation step.\n";
+            return;
         }
+        ASSERT_EQ(compileReturn, Result::NO_ERROR);
 
         // Create request.
         test_wrapper::Execution execution(&compilation);
@@ -147,11 +151,17 @@
 
         // Compute result.
         Result executeReturn = execution.compute();
-        if (featureLevel >= __ANDROID_API_Q__) {
-            ASSERT_EQ(executeReturn, Result::NO_ERROR);
-            if (!isRef) mGraph.checkResults(outputs, mCriteria);
-        } else {
-            ASSERT_TRUE(executeReturn == Result::NO_ERROR || executeReturn == Result::OP_FAILED);
+        // Even if the model is fully supported and the compilation succeeds, the execution may
+        // still fail, e.g. there may be operand shapes that are unknown until execution time, and
+        // at execution time turn out to be too big.
+        if (executeReturn == Result::OP_FAILED) {
+            ASSERT_FALSE(isRef);
+            std::cout << "[          ]   SKIP: " << name << " failed at execution step.\n";
+            return;
+        }
+        ASSERT_EQ(executeReturn, Result::NO_ERROR);
+        if (featureLevel >= __ANDROID_API_Q__ && !isRef) {
+            mGraph.checkResults(outputs, mCriteria);
         }
     }
 
@@ -305,7 +315,8 @@
 TEST_SINGLE_OPERATION(CONCATENATION, V1_2, kMediumCriteria);
 TEST_SINGLE_OPERATION(DEPTH_TO_SPACE, V1_2, kStrictCriteria);
 TEST_SINGLE_OPERATION(SPACE_TO_DEPTH, V1_2, kStrictCriteria);
-TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_2, kStrictCriteria);
+// TODO: Re-enable the test once b/130757689 is fixed.
+// TEST_SINGLE_OPERATION(BATCH_TO_SPACE_ND, V1_2, kStrictCriteria);
 TEST_SINGLE_OPERATION(SPACE_TO_BATCH_ND, V1_2, kStrictCriteria);
 TEST_SINGLE_OPERATION(FULLY_CONNECTED, V1_2, kRelaxedCriteria);
 TEST_SINGLE_OPERATION(RESHAPE, V1_2, kStrictCriteria);
diff --git a/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp b/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
index 6bfbf58..56a7214 100644
--- a/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
+++ b/nn/runtime/test/fuzzing/operation_signatures/Poolings.cpp
@@ -58,6 +58,8 @@
     // width
     explicitPadding(op->inputs[0]->dimensions[widthIndex], filterWidth, strideWidth, /*dilation=*/1,
                     paddingLeft, paddingRight, &op->outputs[0]->dimensions[widthIndex]);
+
+    setSameQuantization(op->outputs[0], op->inputs[0]);
 }
 
 // For pooling ops with implicit padding.
@@ -92,6 +94,8 @@
                     /*dilation=*/1, paddingScheme, &op->outputs[0]->dimensions[heightIndex]);
     implicitPadding(op->inputs[0]->dimensions[widthIndex], filterWidth, strideWidth,
                     /*dilation=*/1, paddingScheme, &op->outputs[0]->dimensions[widthIndex]);
+
+    setSameQuantization(op->outputs[0], op->inputs[0]);
 }
 
 #define DEFINE_POOLING_SIGNATURE(op, ver, ...)                         \
diff --git a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
index 3fda934..0ef741c 100644
--- a/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
+++ b/nn/runtime/test/generated/all_generated_V1_2_vts_tests.cpp
@@ -20737,13 +20737,13 @@
 #endif
 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1) {
   generated_tests::Execute(device,
-                           depthwise_conv2d_v1_2::createTestModel,
-                           depthwise_conv2d_v1_2::is_ignored,
+                           depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1,
+                           depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1,
                            depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1) {
-  const Model model = depthwise_conv2d_v1_2::createTestModel();
+  const Model model = depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1();
   const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1());
   validateEverything(model, requests);
 }
@@ -20751,13 +20751,13 @@
 
 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_relaxed) {
   generated_tests::Execute(device,
-                           depthwise_conv2d_v1_2::createTestModel_relaxed,
-                           depthwise_conv2d_v1_2::is_ignored_relaxed,
+                           depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_relaxed,
+                           depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_relaxed,
                            depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_relaxed) {
-  const Model model = depthwise_conv2d_v1_2::createTestModel_relaxed();
+  const Model model = depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_relaxed();
   const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_relaxed());
   validateEverything(model, requests);
 }
@@ -20765,13 +20765,13 @@
 
 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_weight_as_input) {
   generated_tests::Execute(device,
-                           depthwise_conv2d_v1_2::createTestModel_weight_as_input,
-                           depthwise_conv2d_v1_2::is_ignored_weight_as_input,
+                           depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_weight_as_input,
+                           depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_weight_as_input,
                            depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_weight_as_input());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_weight_as_input) {
-  const Model model = depthwise_conv2d_v1_2::createTestModel_weight_as_input();
+  const Model model = depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_weight_as_input();
   const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_weight_as_input());
   validateEverything(model, requests);
 }
@@ -20779,13 +20779,13 @@
 
 TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_weight_as_input_relaxed) {
   generated_tests::Execute(device,
-                           depthwise_conv2d_v1_2::createTestModel_weight_as_input_relaxed,
-                           depthwise_conv2d_v1_2::is_ignored_weight_as_input_relaxed,
+                           depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_weight_as_input_relaxed,
+                           depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_weight_as_input_relaxed,
                            depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_weight_as_input_relaxed());
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_weight_as_input_relaxed) {
-  const Model model = depthwise_conv2d_v1_2::createTestModel_weight_as_input_relaxed();
+  const Model model = depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_weight_as_input_relaxed();
   const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_weight_as_input_relaxed());
   validateEverything(model, requests);
 }
@@ -20794,13 +20794,13 @@
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
 TEST_F(DynamicOutputShapeTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape) {
   generated_tests::Execute(device,
-                           depthwise_conv2d_v1_2::createTestModel_dynamic_output_shape,
-                           depthwise_conv2d_v1_2::is_ignored_dynamic_output_shape,
+                           depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape,
+                           depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape,
                            depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape(), true);
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape) {
-  const Model model = depthwise_conv2d_v1_2::createTestModel_dynamic_output_shape();
+  const Model model = depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape();
   const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape());
   validateEverything(model, requests);
 }
@@ -20810,13 +20810,13 @@
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
 TEST_F(DynamicOutputShapeTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed) {
   generated_tests::Execute(device,
-                           depthwise_conv2d_v1_2::createTestModel_dynamic_output_shape_relaxed,
-                           depthwise_conv2d_v1_2::is_ignored_dynamic_output_shape_relaxed,
+                           depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed,
+                           depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed,
                            depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed(), true);
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed) {
-  const Model model = depthwise_conv2d_v1_2::createTestModel_dynamic_output_shape_relaxed();
+  const Model model = depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed();
   const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed());
   validateEverything(model, requests);
 }
@@ -20826,13 +20826,13 @@
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
 TEST_F(DynamicOutputShapeTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input) {
   generated_tests::Execute(device,
-                           depthwise_conv2d_v1_2::createTestModel_dynamic_output_shape_weight_as_input,
-                           depthwise_conv2d_v1_2::is_ignored_dynamic_output_shape_weight_as_input,
+                           depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input,
+                           depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input,
                            depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input(), true);
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input) {
-  const Model model = depthwise_conv2d_v1_2::createTestModel_dynamic_output_shape_weight_as_input();
+  const Model model = depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input();
   const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input());
   validateEverything(model, requests);
 }
@@ -20842,13 +20842,13 @@
 #ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE
 TEST_F(DynamicOutputShapeTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed) {
   generated_tests::Execute(device,
-                           depthwise_conv2d_v1_2::createTestModel_dynamic_output_shape_weight_as_input_relaxed,
-                           depthwise_conv2d_v1_2::is_ignored_dynamic_output_shape_weight_as_input_relaxed,
+                           depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed,
+                           depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed,
                            depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed(), true);
 }
 
 TEST_F(ValidationTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed) {
-  const Model model = depthwise_conv2d_v1_2::createTestModel_dynamic_output_shape_weight_as_input_relaxed();
+  const Model model = depthwise_conv2d_v1_2::createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed();
   const std::vector<Request> requests = createRequests(depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed());
   validateEverything(model, requests);
 }
diff --git a/nn/runtime/test/generated/examples/avg_pool_float_2_relaxed.example.cpp b/nn/runtime/test/generated/examples/avg_pool_float_2_relaxed.example.cpp
index fb90caa..2590172 100644
--- a/nn/runtime/test/generated/examples/avg_pool_float_2_relaxed.example.cpp
+++ b/nn/runtime/test/generated/examples/avg_pool_float_2_relaxed.example.cpp
@@ -31,9 +31,9 @@
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> Dimensions map
-  .operandDimensions = {{0, {5, 11, 13, 3}}},
+  .operandDimensions = {{0, {5, 16, 18, 3}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
+  .float32Operands = {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -88,9 +88,9 @@
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> Dimensions map
-  .operandDimensions = {{0, {5, 11, 13, 3}}},
+  .operandDimensions = {{0, {5, 16, 18, 3}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
+  .float32Operands = {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
diff --git a/nn/runtime/test/generated/examples/depthwise_conv2d_v1_2.example.cpp b/nn/runtime/test/generated/examples/depthwise_conv2d_v1_2.example.cpp
index dddc173..88c3f1b 100644
--- a/nn/runtime/test/generated/examples/depthwise_conv2d_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/depthwise_conv2d_v1_2.example.cpp
@@ -9613,7 +9613,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {198, 93, 227, 107, 219, 101, 255, 123}}},
+  .quant8AsymmOperands = {{0, {255, 58, 255, 87, 255, 74, 255, 119}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -9670,7 +9670,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {198, 93, 227, 107, 219, 101, 255, 123}}},
+  .quant8AsymmOperands = {{0, {255, 58, 255, 87, 255, 74, 255, 119}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -9727,7 +9727,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {198, 93, 227, 107, 219, 101, 255, 123}}},
+  .quant8AsymmOperands = {{0, {255, 58, 255, 87, 255, 74, 255, 119}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -9784,7 +9784,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {198, 93, 227, 107, 219, 101, 255, 123}}},
+  .quant8AsymmOperands = {{0, {255, 58, 255, 87, 255, 74, 255, 119}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -9841,7 +9841,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {198, 93, 227, 107, 219, 101, 255, 123}}},
+  .quant8AsymmOperands = {{0, {255, 58, 255, 87, 255, 74, 255, 119}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -9898,7 +9898,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {198, 93, 227, 107, 219, 101, 255, 123}}},
+  .quant8AsymmOperands = {{0, {255, 58, 255, 87, 255, 74, 255, 119}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -9955,7 +9955,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {198, 93, 227, 107, 219, 101, 255, 123}}},
+  .quant8AsymmOperands = {{0, {255, 58, 255, 87, 255, 74, 255, 119}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10012,7 +10012,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {198, 93, 227, 107, 219, 101, 255, 123}}},
+  .quant8AsymmOperands = {{0, {255, 58, 255, 87, 255, 74, 255, 119}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
diff --git a/nn/runtime/test/generated/examples/softmax_v1_2.example.cpp b/nn/runtime/test/generated/examples/softmax_v1_2.example.cpp
index fb0c718..0c9ad57 100644
--- a/nn/runtime/test/generated/examples/softmax_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/softmax_v1_2.example.cpp
@@ -10,7 +10,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -33,7 +33,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -67,7 +67,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -90,7 +90,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -124,7 +124,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -147,7 +147,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -181,7 +181,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -204,7 +204,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -238,7 +238,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -261,7 +261,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -295,7 +295,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -318,7 +318,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -360,7 +360,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -383,7 +383,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -417,7 +417,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -440,7 +440,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -474,7 +474,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -497,7 +497,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -527,7 +527,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -550,7 +550,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -584,7 +584,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -607,7 +607,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -641,7 +641,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -664,7 +664,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -694,7 +694,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -717,7 +717,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -751,7 +751,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -774,7 +774,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -808,7 +808,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -831,7 +831,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -865,7 +865,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -888,7 +888,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -922,7 +922,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -945,7 +945,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -979,7 +979,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -1002,7 +1002,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -1044,7 +1044,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -1067,7 +1067,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -1101,7 +1101,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -1124,7 +1124,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -1158,7 +1158,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -1181,7 +1181,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -1211,7 +1211,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -1234,7 +1234,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -1268,7 +1268,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -1291,7 +1291,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -1325,7 +1325,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -1348,7 +1348,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2746,7 +2746,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2769,7 +2769,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2803,7 +2803,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2826,7 +2826,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2860,7 +2860,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2883,7 +2883,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2917,7 +2917,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2940,7 +2940,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2974,7 +2974,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -2997,7 +2997,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3031,7 +3031,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3054,7 +3054,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3088,7 +3088,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3111,7 +3111,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3145,7 +3145,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3168,7 +3168,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3202,7 +3202,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3225,7 +3225,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3259,7 +3259,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3282,7 +3282,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3316,7 +3316,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3339,7 +3339,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3373,7 +3373,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3396,7 +3396,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3430,7 +3430,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3453,7 +3453,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3487,7 +3487,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3510,7 +3510,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3544,7 +3544,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3567,7 +3567,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3601,7 +3601,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3624,7 +3624,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3658,7 +3658,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3681,7 +3681,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3715,7 +3715,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3738,7 +3738,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3772,7 +3772,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3795,7 +3795,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3829,7 +3829,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3852,7 +3852,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3886,7 +3886,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3909,7 +3909,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3943,7 +3943,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -3966,7 +3966,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4000,7 +4000,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4023,7 +4023,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4057,7 +4057,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4080,7 +4080,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4114,7 +4114,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4137,7 +4137,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4171,7 +4171,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4194,7 +4194,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4228,7 +4228,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4251,7 +4251,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4285,7 +4285,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4308,7 +4308,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4342,7 +4342,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4365,7 +4365,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4399,7 +4399,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4422,7 +4422,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4456,7 +4456,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4479,7 +4479,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4513,7 +4513,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4536,7 +4536,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4570,7 +4570,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4593,7 +4593,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4627,7 +4627,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4650,7 +4650,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4684,7 +4684,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4707,7 +4707,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4741,7 +4741,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4764,7 +4764,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4798,7 +4798,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4821,7 +4821,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4855,7 +4855,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4878,7 +4878,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4912,7 +4912,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4935,7 +4935,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4969,7 +4969,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -4992,7 +4992,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -5034,7 +5034,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5057,7 +5057,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5091,7 +5091,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5114,7 +5114,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5148,7 +5148,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5171,7 +5171,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5205,7 +5205,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5228,7 +5228,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5262,7 +5262,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5285,7 +5285,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5319,7 +5319,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5342,7 +5342,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5376,7 +5376,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5399,7 +5399,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5433,7 +5433,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5456,7 +5456,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5490,7 +5490,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5513,7 +5513,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5547,7 +5547,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5570,7 +5570,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5604,7 +5604,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5627,7 +5627,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5661,7 +5661,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5684,7 +5684,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5718,7 +5718,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5741,7 +5741,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5775,7 +5775,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5798,7 +5798,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5832,7 +5832,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5855,7 +5855,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5889,7 +5889,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5912,7 +5912,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5946,7 +5946,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -5969,7 +5969,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -6003,7 +6003,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -6026,7 +6026,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -6060,7 +6060,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -6083,7 +6083,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -6117,7 +6117,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -6140,7 +6140,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -6170,7 +6170,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 132, 124, 132, 124, 136, 120, 136, 120, 136, 120, 136, 120, 140, 116, 140, 116, 140, 116, 140, 116, 144, 112, 144, 112, 144, 112, 144, 112, 148, 108, 148, 108, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 196, 124, 196, 124, 192, 120, 192, 120, 192, 120, 192, 120, 188, 116, 188, 116, 188, 116, 188, 116, 184, 112, 184, 112, 184, 112, 184, 112, 132, 60, 132, 60, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6193,7 +6193,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 3, 163, 3, 163, 8, 60, 8, 60, 8, 60, 8, 60, 22, 22, 22, 22, 22, 22, 22, 22, 60, 8, 60, 8, 60, 8, 60, 8, 163, 3, 163, 3, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 165, 165, 165, 165, 61, 61, 61, 61, 61, 61, 61, 61, 22, 22, 22, 22, 22, 22, 22, 22, 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6227,7 +6227,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 132, 124, 132, 124, 136, 120, 136, 120, 136, 120, 136, 120, 140, 116, 140, 116, 140, 116, 140, 116, 144, 112, 144, 112, 144, 112, 144, 112, 148, 108, 148, 108, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 196, 124, 196, 124, 192, 120, 192, 120, 192, 120, 192, 120, 188, 116, 188, 116, 188, 116, 188, 116, 184, 112, 184, 112, 184, 112, 184, 112, 132, 60, 132, 60, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6250,7 +6250,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 3, 163, 3, 163, 8, 60, 8, 60, 8, 60, 8, 60, 22, 22, 22, 22, 22, 22, 22, 22, 60, 8, 60, 8, 60, 8, 60, 8, 163, 3, 163, 3, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 165, 165, 165, 165, 61, 61, 61, 61, 61, 61, 61, 61, 22, 22, 22, 22, 22, 22, 22, 22, 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6284,7 +6284,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108, 132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60, 196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6307,7 +6307,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3, 3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0, 165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6341,7 +6341,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108, 132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60, 196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6364,7 +6364,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3, 3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0, 165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6398,7 +6398,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6421,7 +6421,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6455,7 +6455,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6478,7 +6478,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6512,7 +6512,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6535,7 +6535,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6569,7 +6569,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6592,7 +6592,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6626,7 +6626,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6649,7 +6649,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6683,7 +6683,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6706,7 +6706,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6740,7 +6740,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6763,7 +6763,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6797,7 +6797,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6820,7 +6820,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6854,7 +6854,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6877,7 +6877,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6911,7 +6911,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6934,7 +6934,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6968,7 +6968,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -6991,7 +6991,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7025,7 +7025,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7048,7 +7048,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7082,7 +7082,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7105,7 +7105,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7139,7 +7139,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7162,7 +7162,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7196,7 +7196,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7219,7 +7219,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7253,7 +7253,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7276,7 +7276,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -7306,7 +7306,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7329,7 +7329,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7363,7 +7363,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7386,7 +7386,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7420,7 +7420,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7443,7 +7443,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7477,7 +7477,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7500,7 +7500,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7534,7 +7534,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7557,7 +7557,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7591,7 +7591,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7614,7 +7614,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7648,7 +7648,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7671,7 +7671,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7705,7 +7705,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7728,7 +7728,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7762,7 +7762,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7785,7 +7785,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7819,7 +7819,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7842,7 +7842,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7876,7 +7876,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7899,7 +7899,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7933,7 +7933,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7956,7 +7956,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -7990,7 +7990,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8013,7 +8013,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8047,7 +8047,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8070,7 +8070,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8104,7 +8104,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8127,7 +8127,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8161,7 +8161,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8184,7 +8184,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8218,7 +8218,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8241,7 +8241,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8275,7 +8275,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8298,7 +8298,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8332,7 +8332,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8355,7 +8355,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8389,7 +8389,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8412,7 +8412,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8446,7 +8446,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8469,7 +8469,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8503,7 +8503,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8526,7 +8526,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8560,7 +8560,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8583,7 +8583,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8617,7 +8617,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8640,7 +8640,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8674,7 +8674,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8697,7 +8697,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8731,7 +8731,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8754,7 +8754,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8788,7 +8788,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8811,7 +8811,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8845,7 +8845,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8868,7 +8868,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8902,7 +8902,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8925,7 +8925,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8959,7 +8959,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -8982,7 +8982,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9016,7 +9016,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9039,7 +9039,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9073,7 +9073,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9096,7 +9096,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f, 0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f, 0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9130,7 +9130,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9153,7 +9153,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9187,7 +9187,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9210,7 +9210,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f, 0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9244,7 +9244,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9267,7 +9267,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9301,7 +9301,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9324,7 +9324,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5, 2}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.636408647f, 0.031684921f, 0.234121657f, 0.086128544f, 0.086128544f, 0.234121657f, 0.031684921f, 0.636408647f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.643914213228014f, 0.236882800924671f, 0.236882800924671f, 0.087144312427294f, 0.087144312427294f, 0.032058600957022f, 0.032058600957022f, 7.246299848982885e-08f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9358,7 +9358,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9381,7 +9381,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9415,7 +9415,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9438,7 +9438,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {2, 5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f, 0.636408647f, 0.234121657f, 0.086128544f, 0.031684921f, 0.011656231f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f, 0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9472,7 +9472,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9495,7 +9495,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9529,7 +9529,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float32Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9552,7 +9552,7 @@
   // int -> Dimensions map
   .operandDimensions = {{0, {5}}},
   // int -> FLOAT32 map
-  .float32Operands = {{0, {0.011656231f, 0.031684921f, 0.086128544f, 0.234121657f, 0.636408647f}}},
+  .float32Operands = {{0, {0.643914213228014f, 0.236882800924671f, 0.087144312427294f, 0.032058600957022f, 7.246299848982885e-08f}}},
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
@@ -9594,7 +9594,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9617,7 +9617,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9651,7 +9651,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9674,7 +9674,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9708,7 +9708,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9731,7 +9731,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9765,7 +9765,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f, 1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f, 17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9788,7 +9788,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9822,7 +9822,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9845,7 +9845,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9879,7 +9879,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9902,7 +9902,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9936,7 +9936,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9959,7 +9959,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -9993,7 +9993,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10016,7 +10016,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10050,7 +10050,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10073,7 +10073,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10107,7 +10107,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 1.0f, -1.0f, 2.0f, -2.0f, 2.0f, -2.0f, 3.0f, -3.0f, 3.0f, -3.0f, 4.0f, -4.0f, 4.0f, -4.0f, 5.0f, -5.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 17.0f, -1.0f, 16.0f, -2.0f, 16.0f, -2.0f, 15.0f, -3.0f, 15.0f, -3.0f, 14.0f, -4.0f, 14.0f, -4.0f, 1.0f, -17.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10130,7 +10130,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10164,7 +10164,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10187,7 +10187,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10221,7 +10221,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f, 1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f, 17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10244,7 +10244,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f, 0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f, 0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10278,7 +10278,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10301,7 +10301,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10335,7 +10335,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f, 17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10358,7 +10358,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f, 0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10392,7 +10392,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10415,7 +10415,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10449,7 +10449,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, -1.0f, 2.0f, -2.0f, 3.0f, -3.0f, 4.0f, -4.0f, 5.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, -1.0f, 16.0f, -2.0f, 15.0f, -3.0f, 14.0f, -4.0f, 1.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10472,7 +10472,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.6364086270332336f, 0.03168492019176483f, 0.2341216504573822f, 0.08612854033708572f, 0.08612854033708572f, 0.2341216504573822f, 0.03168492019176483f, 0.6364086270332336f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.6439142227172852f, 0.23688280582427979f, 0.23688280582427979f, 0.08714431524276733f, 0.08714431524276733f, 0.03205860033631325f, 0.03205860033631325f, 7.246299560392799e-08f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10506,7 +10506,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10529,7 +10529,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10563,7 +10563,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, -1.0f, -2.0f, -3.0f, -4.0f, -5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f, -1.0f, -2.0f, -3.0f, -4.0f, -17.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10586,7 +10586,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f, 0.6364086270332336f, 0.2341216504573822f, 0.08612854033708572f, 0.03168492019176483f, 0.011656231246888638f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f, 0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10620,7 +10620,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10643,7 +10643,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10677,7 +10677,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f}}},
+  .float16Operands = {{0, {17.0f, 16.0f, 15.0f, 14.0f, 1.0f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10700,7 +10700,7 @@
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
-  .float16Operands = {{0, {0.011656231246888638f, 0.03168492019176483f, 0.08612854033708572f, 0.2341216504573822f, 0.6364086270332336f}}},
+  .float16Operands = {{0, {0.6439142227172852f, 0.23688280582427979f, 0.08714431524276733f, 0.03205860033631325f, 7.246299560392799e-08f}}},
   // int -> BOOL8 map
   .bool8Operands = {},
   // int -> QUANT8_SYMM_PER_CHANNEL map
@@ -10730,7 +10730,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 132, 124, 132, 124, 136, 120, 136, 120, 136, 120, 136, 120, 140, 116, 140, 116, 140, 116, 140, 116, 144, 112, 144, 112, 144, 112, 144, 112, 148, 108, 148, 108, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 196, 124, 196, 124, 192, 120, 192, 120, 192, 120, 192, 120, 188, 116, 188, 116, 188, 116, 188, 116, 184, 112, 184, 112, 184, 112, 184, 112, 132, 60, 132, 60, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10753,7 +10753,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 3, 163, 3, 163, 8, 60, 8, 60, 8, 60, 8, 60, 22, 22, 22, 22, 22, 22, 22, 22, 60, 8, 60, 8, 60, 8, 60, 8, 163, 3, 163, 3, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 165, 165, 165, 165, 61, 61, 61, 61, 61, 61, 61, 61, 22, 22, 22, 22, 22, 22, 22, 22, 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10787,7 +10787,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 132, 124, 132, 124, 136, 120, 136, 120, 136, 120, 136, 120, 140, 116, 140, 116, 140, 116, 140, 116, 144, 112, 144, 112, 144, 112, 144, 112, 148, 108, 148, 108, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 196, 124, 196, 124, 192, 120, 192, 120, 192, 120, 192, 120, 188, 116, 188, 116, 188, 116, 188, 116, 184, 112, 184, 112, 184, 112, 184, 112, 132, 60, 132, 60, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10810,7 +10810,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 3, 163, 3, 163, 8, 60, 8, 60, 8, 60, 8, 60, 22, 22, 22, 22, 22, 22, 22, 22, 60, 8, 60, 8, 60, 8, 60, 8, 163, 3, 163, 3, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 165, 165, 165, 165, 61, 61, 61, 61, 61, 61, 61, 61, 22, 22, 22, 22, 22, 22, 22, 22, 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10844,7 +10844,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108, 132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60, 196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10867,7 +10867,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3, 3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0, 165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10901,7 +10901,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108, 132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60, 196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10924,7 +10924,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3, 3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0, 165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10958,7 +10958,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -10981,7 +10981,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11015,7 +11015,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11038,7 +11038,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11072,7 +11072,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11095,7 +11095,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11129,7 +11129,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11152,7 +11152,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11186,7 +11186,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11209,7 +11209,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11243,7 +11243,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 132, 124, 136, 120, 136, 120, 140, 116, 140, 116, 144, 112, 144, 112, 148, 108, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 196, 124, 192, 120, 192, 120, 188, 116, 188, 116, 184, 112, 184, 112, 132, 60, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11266,7 +11266,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 3, 163, 8, 60, 8, 60, 22, 22, 22, 22, 60, 8, 60, 8, 163, 3, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 165, 165, 61, 61, 61, 61, 22, 22, 22, 22, 8, 8, 8, 8, 0, 0, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11300,7 +11300,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11323,7 +11323,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11357,7 +11357,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108, 132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60, 196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11380,7 +11380,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3, 3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0, 165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11414,7 +11414,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11437,7 +11437,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11471,7 +11471,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108, 132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60, 196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11494,7 +11494,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3, 3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11528,7 +11528,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11551,7 +11551,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11585,7 +11585,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 124, 136, 120, 140, 116, 144, 112, 148, 108}}},
+  .quant8AsymmOperands = {{0, {196, 124, 192, 120, 188, 116, 184, 112, 132, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11608,7 +11608,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 163, 8, 60, 22, 22, 60, 8, 163, 3}}},
+  .quant8AsymmOperands = {{0, {165, 165, 61, 61, 22, 22, 8, 8, 0, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11642,7 +11642,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11665,7 +11665,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11699,7 +11699,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 124, 120, 116, 112, 108}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132, 124, 120, 116, 112, 60}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11722,7 +11722,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163, 163, 60, 22, 8, 3}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0, 165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11756,7 +11756,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11779,7 +11779,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11813,7 +11813,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148}}},
+  .quant8AsymmOperands = {{0, {196, 192, 188, 184, 132}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -11836,7 +11836,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {3, 8, 22, 60, 163}}},
+  .quant8AsymmOperands = {{0, {165, 61, 22, 8, 0}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
diff --git a/nn/runtime/test/generated/examples/space_to_batch_v1_2.example.cpp b/nn/runtime/test/generated/examples/space_to_batch_v1_2.example.cpp
index 92eac15..e8d8cd2 100644
--- a/nn/runtime/test/generated/examples/space_to_batch_v1_2.example.cpp
+++ b/nn/runtime/test/generated/examples/space_to_batch_v1_2.example.cpp
@@ -2009,7 +2009,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {2, 4, 6, 8, 10, 12, 14, 16, 18, 20}}},
+  .quant8AsymmOperands = {{0, {130, 132, 134, 136, 138, 140, 142, 144, 146, 148}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2032,7 +2032,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {0, 0, 0, 10, 0, 0, 0, 12, 0, 2, 0, 14, 0, 4, 0, 16, 0, 6, 0, 18, 0, 8, 0, 20}}},
+  .quant8AsymmOperands = {{0, {128, 128, 128, 138, 128, 128, 128, 140, 128, 130, 128, 142, 128, 132, 128, 144, 128, 134, 128, 146, 128, 136, 128, 148}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2237,7 +2237,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {2, 4, 6, 8, 10, 12, 14, 16, 18, 20}}},
+  .quant8AsymmOperands = {{0, {130, 132, 134, 136, 138, 140, 142, 144, 146, 148}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2260,7 +2260,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {0, 0, 0, 10, 0, 0, 0, 12, 0, 2, 0, 14, 0, 4, 0, 16, 0, 6, 0, 18, 0, 8, 0, 20}}},
+  .quant8AsymmOperands = {{0, {128, 128, 128, 138, 128, 128, 128, 140, 128, 130, 128, 142, 128, 132, 128, 144, 128, 134, 128, 146, 128, 136, 128, 148}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2465,7 +2465,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {2, 4, 6, 8, 10, 12, 14, 16, 18, 20}}},
+  .quant8AsymmOperands = {{0, {130, 132, 134, 136, 138, 140, 142, 144, 146, 148}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2488,7 +2488,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {0, 0, 0, 10, 0, 0, 0, 12, 0, 2, 0, 14, 0, 4, 0, 16, 0, 6, 0, 18, 0, 8, 0, 20}}},
+  .quant8AsymmOperands = {{0, {128, 128, 128, 138, 128, 128, 128, 140, 128, 130, 128, 142, 128, 132, 128, 144, 128, 134, 128, 146, 128, 136, 128, 148}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2693,7 +2693,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {2, 4, 6, 8, 10, 12, 14, 16, 18, 20}}},
+  .quant8AsymmOperands = {{0, {130, 132, 134, 136, 138, 140, 142, 144, 146, 148}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2716,7 +2716,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {0, 0, 0, 10, 0, 0, 0, 12, 0, 2, 0, 14, 0, 4, 0, 16, 0, 6, 0, 18, 0, 8, 0, 20}}},
+  .quant8AsymmOperands = {{0, {128, 128, 128, 138, 128, 128, 128, 140, 128, 130, 128, 142, 128, 132, 128, 144, 128, 134, 128, 146, 128, 136, 128, 148}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2921,7 +2921,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {4, 8, 12, 16, 20, 24, 28, 32}}},
+  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 152, 156, 160}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -2944,7 +2944,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 4, 0, 0, 0, 28, 0, 0, 0, 8, 0, 0, 0, 32, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0}}},
+  .quant8AsymmOperands = {{0, {128, 128, 128, 128, 128, 148, 128, 128, 128, 128, 128, 128, 128, 152, 128, 128, 128, 132, 128, 128, 128, 156, 128, 128, 128, 136, 128, 128, 128, 160, 128, 128, 128, 140, 128, 128, 128, 128, 128, 128, 128, 144, 128, 128, 128, 128, 128, 128}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -3149,7 +3149,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {4, 8, 12, 16, 20, 24, 28, 32}}},
+  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 152, 156, 160}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -3172,7 +3172,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 4, 0, 0, 0, 28, 0, 0, 0, 8, 0, 0, 0, 32, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0}}},
+  .quant8AsymmOperands = {{0, {128, 128, 128, 128, 128, 148, 128, 128, 128, 128, 128, 128, 128, 152, 128, 128, 128, 132, 128, 128, 128, 156, 128, 128, 128, 136, 128, 128, 128, 160, 128, 128, 128, 140, 128, 128, 128, 128, 128, 128, 128, 144, 128, 128, 128, 128, 128, 128}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -3377,7 +3377,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {4, 8, 12, 16, 20, 24, 28, 32}}},
+  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 152, 156, 160}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -3400,7 +3400,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 4, 0, 0, 0, 28, 0, 0, 0, 8, 0, 0, 0, 32, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0}}},
+  .quant8AsymmOperands = {{0, {128, 128, 128, 128, 128, 148, 128, 128, 128, 128, 128, 128, 128, 152, 128, 128, 128, 132, 128, 128, 128, 156, 128, 128, 128, 136, 128, 128, 128, 160, 128, 128, 128, 140, 128, 128, 128, 128, 128, 128, 128, 144, 128, 128, 128, 128, 128, 128}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -3605,7 +3605,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {4, 8, 12, 16, 20, 24, 28, 32}}},
+  .quant8AsymmOperands = {{0, {132, 136, 140, 144, 148, 152, 156, 160}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
@@ -3628,7 +3628,7 @@
   // int -> INT32 map
   .int32Operands = {},
   // int -> QUANT8_ASYMM map
-  .quant8AsymmOperands = {{0, {0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 4, 0, 0, 0, 28, 0, 0, 0, 8, 0, 0, 0, 32, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0}}},
+  .quant8AsymmOperands = {{0, {128, 128, 128, 128, 128, 148, 128, 128, 128, 128, 128, 128, 128, 152, 128, 128, 128, 132, 128, 128, 128, 156, 128, 128, 128, 136, 128, 128, 128, 160, 128, 128, 128, 140, 128, 128, 128, 128, 128, 128, 128, 144, 128, 128, 128, 128, 128, 128}}},
   // int -> QUANT16_SYMM map
   .quant16SymmOperands = {},
   // int -> FLOAT16 map
diff --git a/nn/runtime/test/generated/models/avg_pool_float_2_relaxed.model.cpp b/nn/runtime/test/generated/models/avg_pool_float_2_relaxed.model.cpp
index 1582980..2261b6b 100644
--- a/nn/runtime/test/generated/models/avg_pool_float_2_relaxed.model.cpp
+++ b/nn/runtime/test/generated/models/avg_pool_float_2_relaxed.model.cpp
@@ -3,7 +3,7 @@
 void CreateModel(Model *model) {
   OperandType type0(Type::TENSOR_FLOAT32, {5, 52, 60, 3});
   OperandType type1(Type::INT32, {});
-  OperandType type2(Type::TENSOR_FLOAT32, {5, 11, 13, 3});
+  OperandType type2(Type::TENSOR_FLOAT32, {5, 16, 18, 3});
   // Phase 1, operands
   auto i0 = model->addOperand(&type0);
   auto padding = model->addOperand(&type1);
@@ -12,11 +12,11 @@
   auto activation = model->addOperand(&type1);
   auto output = model->addOperand(&type2);
   // Phase 2, operations
-  static int32_t padding_init[] = {50};
+  static int32_t padding_init[] = {30};
   model->setOperandValue(padding, padding_init, sizeof(int32_t) * 1);
   static int32_t stride_init[] = {5};
   model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
-  static int32_t filter_init[] = {100};
+  static int32_t filter_init[] = {35};
   model->setOperandValue(filter, filter_init, sizeof(int32_t) * 1);
   static int32_t activation_init[] = {0};
   model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
@@ -47,11 +47,11 @@
   auto activation = model->addOperand(&type1);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
-  static int32_t padding_init[] = {50};
+  static int32_t padding_init[] = {30};
   model->setOperandValue(padding, padding_init, sizeof(int32_t) * 1);
   static int32_t stride_init[] = {5};
   model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
-  static int32_t filter_init[] = {100};
+  static int32_t filter_init[] = {35};
   model->setOperandValue(filter, filter_init, sizeof(int32_t) * 1);
   static int32_t activation_init[] = {0};
   model->setOperandValue(activation, activation_init, sizeof(int32_t) * 1);
diff --git a/nn/runtime/test/generated/models/depthwise_conv2d_v1_2.model.cpp b/nn/runtime/test/generated/models/depthwise_conv2d_v1_2.model.cpp
index 55a22fe..c5e627f 100644
--- a/nn/runtime/test/generated/models/depthwise_conv2d_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/depthwise_conv2d_v1_2.model.cpp
@@ -8902,10 +8902,10 @@
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel(Model *model) {
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
+void CreateModel_quant_output_multiplier_gt_1(Model *model) {
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 1.0058823529411764f, 127);
   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 1.0058823529411764f, 128);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.5029411764705882f, 0);
+  OperandType type13(Type::TENSOR_INT32, {4}, 1.0117993079584775f, 0);
   OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 127);
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
@@ -8941,15 +8941,15 @@
   assert(model->isValid());
 }
 
-inline bool is_ignored(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_relaxed(Model *model) {
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
+void CreateModel_quant_output_multiplier_gt_1_relaxed(Model *model) {
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 1.0058823529411764f, 127);
   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 1.0058823529411764f, 128);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.5029411764705882f, 0);
+  OperandType type13(Type::TENSOR_INT32, {4}, 1.0117993079584775f, 0);
   OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 127);
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
@@ -8987,15 +8987,15 @@
   assert(model->isValid());
 }
 
-inline bool is_ignored_relaxed(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_relaxed(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_weight_as_input(Model *model) {
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
+void CreateModel_quant_output_multiplier_gt_1_weight_as_input(Model *model) {
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 1.0058823529411764f, 127);
   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 1.0058823529411764f, 128);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.5029411764705882f, 0);
+  OperandType type13(Type::TENSOR_INT32, {4}, 1.0117993079584775f, 0);
   OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 127);
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
@@ -9027,15 +9027,15 @@
   assert(model->isValid());
 }
 
-inline bool is_ignored_weight_as_input(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_weight_as_input(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_weight_as_input_relaxed(Model *model) {
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
+void CreateModel_quant_output_multiplier_gt_1_weight_as_input_relaxed(Model *model) {
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 1.0058823529411764f, 127);
   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 1.0058823529411764f, 128);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.5029411764705882f, 0);
+  OperandType type13(Type::TENSOR_INT32, {4}, 1.0117993079584775f, 0);
   OperandType type14(Type::TENSOR_QUANT8_ASYMM, {1, 2, 1, 4}, 1.0f, 127);
   OperandType type4(Type::INT32, {});
   // Phase 1, operands
@@ -9069,15 +9069,15 @@
   assert(model->isValid());
 }
 
-inline bool is_ignored_weight_as_input_relaxed(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_weight_as_input_relaxed(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_dynamic_output_shape(Model *model) {
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
+void CreateModel_quant_output_multiplier_gt_1_dynamic_output_shape(Model *model) {
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 1.0058823529411764f, 127);
   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 1.0058823529411764f, 128);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.5029411764705882f, 0);
+  OperandType type13(Type::TENSOR_INT32, {4}, 1.0117993079584775f, 0);
   OperandType type4(Type::INT32, {});
   OperandType type78(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 1.0f, 127);
   // Phase 1, operands
@@ -9113,15 +9113,15 @@
   assert(model->isValid());
 }
 
-inline bool is_ignored_dynamic_output_shape(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_dynamic_output_shape_relaxed(Model *model) {
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
+void CreateModel_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed(Model *model) {
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 1.0058823529411764f, 127);
   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 1.0058823529411764f, 128);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.5029411764705882f, 0);
+  OperandType type13(Type::TENSOR_INT32, {4}, 1.0117993079584775f, 0);
   OperandType type4(Type::INT32, {});
   OperandType type78(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 1.0f, 127);
   // Phase 1, operands
@@ -9159,15 +9159,15 @@
   assert(model->isValid());
 }
 
-inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_dynamic_output_shape_weight_as_input(Model *model) {
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
+void CreateModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input(Model *model) {
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 1.0058823529411764f, 127);
   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 1.0058823529411764f, 128);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.5029411764705882f, 0);
+  OperandType type13(Type::TENSOR_INT32, {4}, 1.0117993079584775f, 0);
   OperandType type4(Type::INT32, {});
   OperandType type78(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 1.0f, 127);
   // Phase 1, operands
@@ -9199,15 +9199,15 @@
   assert(model->isValid());
 }
 
-inline bool is_ignored_dynamic_output_shape_weight_as_input(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
-void CreateModel_dynamic_output_shape_weight_as_input_relaxed(Model *model) {
-  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 0.5f, 127);
+void CreateModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed(Model *model) {
+  OperandType type11(Type::TENSOR_QUANT8_ASYMM, {1, 3, 2, 2}, 1.0058823529411764f, 127);
   OperandType type12(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 4}, 1.0058823529411764f, 128);
-  OperandType type13(Type::TENSOR_INT32, {4}, 0.5029411764705882f, 0);
+  OperandType type13(Type::TENSOR_INT32, {4}, 1.0117993079584775f, 0);
   OperandType type4(Type::INT32, {});
   OperandType type78(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 1.0f, 127);
   // Phase 1, operands
@@ -9241,7 +9241,7 @@
   assert(model->isValid());
 }
 
-inline bool is_ignored_dynamic_output_shape_weight_as_input_relaxed(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
diff --git a/nn/runtime/test/generated/models/space_to_batch_v1_2.model.cpp b/nn/runtime/test/generated/models/space_to_batch_v1_2.model.cpp
index ccbbbe2..e24a621 100644
--- a/nn/runtime/test/generated/models/space_to_batch_v1_2.model.cpp
+++ b/nn/runtime/test/generated/models/space_to_batch_v1_2.model.cpp
@@ -1141,8 +1141,8 @@
 void CreateModel_nhwc_quant8_3(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 5, 2, 1}, 0.5f, 0);
-  OperandType type35(Type::TENSOR_QUANT8_ASYMM, {6, 2, 2, 1}, 0.5f, 0);
+  OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 5, 2, 1}, 0.5f, 128);
+  OperandType type35(Type::TENSOR_QUANT8_ASYMM, {6, 2, 2, 1}, 0.5f, 128);
   OperandType type4(Type::TENSOR_INT32, {2});
   // Phase 1, operands
   auto op12 = model->addOperand(&type34);
@@ -1272,8 +1272,8 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 1, 5, 2}, 0.5f, 0);
-  OperandType type41(Type::TENSOR_QUANT8_ASYMM, {6, 1, 2, 2}, 0.5f, 0);
+  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 1, 5, 2}, 0.5f, 128);
+  OperandType type41(Type::TENSOR_QUANT8_ASYMM, {6, 1, 2, 2}, 0.5f, 128);
   // Phase 1, operands
   auto op12 = model->addOperand(&type40);
   auto param2 = model->addOperand(&type4);
@@ -1401,15 +1401,15 @@
 void CreateModel_dynamic_output_shape_nhwc_quant8_3(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type31(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 0);
-  OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 5, 2, 1}, 0.5f, 0);
+  OperandType type34(Type::TENSOR_QUANT8_ASYMM, {1, 5, 2, 1}, 0.5f, 128);
   OperandType type4(Type::TENSOR_INT32, {2});
+  OperandType type42(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 128);
   // Phase 1, operands
   auto op12 = model->addOperand(&type34);
   auto param2 = model->addOperand(&type4);
   auto paddings1 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type31);
+  auto op42 = model->addOperand(&type42);
   // Phase 2, operations
   static int32_t param2_init[] = {3, 2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
@@ -1531,15 +1531,15 @@
 void CreateModel_dynamic_output_shape_nchw_quant8_3(Model *model) {
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
-  OperandType type31(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 0);
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 1, 5, 2}, 0.5f, 0);
+  OperandType type40(Type::TENSOR_QUANT8_ASYMM, {1, 1, 5, 2}, 0.5f, 128);
+  OperandType type42(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.5f, 128);
   // Phase 1, operands
   auto op12 = model->addOperand(&type40);
   auto param2 = model->addOperand(&type4);
   auto paddings1 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op42 = model->addOperand(&type31);
+  auto op42 = model->addOperand(&type42);
   // Phase 2, operations
   static int32_t param2_init[] = {3, 2};
   model->setOperandValue(param2, param2_init, sizeof(int32_t) * 2);
@@ -1630,14 +1630,14 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type42(Type::TENSOR_FLOAT16, {1, 4, 2, 1});
-  OperandType type43(Type::TENSOR_FLOAT16, {6, 2, 4, 1});
+  OperandType type43(Type::TENSOR_FLOAT16, {1, 4, 2, 1});
+  OperandType type44(Type::TENSOR_FLOAT16, {6, 2, 4, 1});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type42);
+  auto op13 = model->addOperand(&type43);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type43);
+  auto op43 = model->addOperand(&type44);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -1662,14 +1662,14 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type44(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 0.25f, 0);
-  OperandType type45(Type::TENSOR_QUANT8_ASYMM, {6, 2, 4, 1}, 0.25f, 0);
+  OperandType type45(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 0.25f, 128);
+  OperandType type46(Type::TENSOR_QUANT8_ASYMM, {6, 2, 4, 1}, 0.25f, 128);
   // Phase 1, operands
-  auto op13 = model->addOperand(&type44);
+  auto op13 = model->addOperand(&type45);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type45);
+  auto op43 = model->addOperand(&type46);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -1694,14 +1694,14 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type46(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
-  OperandType type47(Type::TENSOR_FLOAT32, {6, 1, 2, 4});
+  OperandType type47(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
+  OperandType type48(Type::TENSOR_FLOAT32, {6, 1, 2, 4});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type46);
+  auto op13 = model->addOperand(&type47);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type47);
+  auto op43 = model->addOperand(&type48);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -1726,14 +1726,14 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type46(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
-  OperandType type47(Type::TENSOR_FLOAT32, {6, 1, 2, 4});
+  OperandType type47(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
+  OperandType type48(Type::TENSOR_FLOAT32, {6, 1, 2, 4});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type46);
+  auto op13 = model->addOperand(&type47);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type47);
+  auto op43 = model->addOperand(&type48);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -1760,14 +1760,14 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type48(Type::TENSOR_FLOAT16, {1, 1, 4, 2});
-  OperandType type49(Type::TENSOR_FLOAT16, {6, 1, 2, 4});
+  OperandType type49(Type::TENSOR_FLOAT16, {1, 1, 4, 2});
+  OperandType type50(Type::TENSOR_FLOAT16, {6, 1, 2, 4});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type48);
+  auto op13 = model->addOperand(&type49);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type49);
+  auto op43 = model->addOperand(&type50);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -1792,14 +1792,14 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type50(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 2}, 0.25f, 0);
-  OperandType type51(Type::TENSOR_QUANT8_ASYMM, {6, 1, 2, 4}, 0.25f, 0);
+  OperandType type51(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 2}, 0.25f, 128);
+  OperandType type52(Type::TENSOR_QUANT8_ASYMM, {6, 1, 2, 4}, 0.25f, 128);
   // Phase 1, operands
-  auto op13 = model->addOperand(&type50);
+  auto op13 = model->addOperand(&type51);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type51);
+  auto op43 = model->addOperand(&type52);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -1891,9 +1891,9 @@
   OperandType type19(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type42(Type::TENSOR_FLOAT16, {1, 4, 2, 1});
+  OperandType type43(Type::TENSOR_FLOAT16, {1, 4, 2, 1});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type42);
+  auto op13 = model->addOperand(&type43);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
@@ -1922,14 +1922,14 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type44(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 0.25f, 0);
-  OperandType type52(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.25f, 0);
+  OperandType type45(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 0.25f, 128);
+  OperandType type53(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.25f, 128);
   // Phase 1, operands
-  auto op13 = model->addOperand(&type44);
+  auto op13 = model->addOperand(&type45);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type52);
+  auto op43 = model->addOperand(&type53);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
@@ -1955,9 +1955,9 @@
   OperandType type18(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type46(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
+  OperandType type47(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type46);
+  auto op13 = model->addOperand(&type47);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
@@ -1987,9 +1987,9 @@
   OperandType type18(Type::TENSOR_FLOAT32, {0, 0, 0, 0});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type46(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
+  OperandType type47(Type::TENSOR_FLOAT32, {1, 1, 4, 2});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type46);
+  auto op13 = model->addOperand(&type47);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
@@ -2021,9 +2021,9 @@
   OperandType type19(Type::TENSOR_FLOAT16, {0, 0, 0, 0});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type48(Type::TENSOR_FLOAT16, {1, 1, 4, 2});
+  OperandType type49(Type::TENSOR_FLOAT16, {1, 1, 4, 2});
   // Phase 1, operands
-  auto op13 = model->addOperand(&type48);
+  auto op13 = model->addOperand(&type49);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
@@ -2052,14 +2052,14 @@
   OperandType type0(Type::BOOL, {});
   OperandType type2(Type::TENSOR_INT32, {2, 2});
   OperandType type4(Type::TENSOR_INT32, {2});
-  OperandType type50(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 2}, 0.25f, 0);
-  OperandType type52(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.25f, 0);
+  OperandType type51(Type::TENSOR_QUANT8_ASYMM, {1, 1, 4, 2}, 0.25f, 128);
+  OperandType type53(Type::TENSOR_QUANT8_ASYMM, {0, 0, 0, 0}, 0.25f, 128);
   // Phase 1, operands
-  auto op13 = model->addOperand(&type50);
+  auto op13 = model->addOperand(&type51);
   auto param3 = model->addOperand(&type4);
   auto paddings2 = model->addOperand(&type2);
   auto layout = model->addOperand(&type0);
-  auto op43 = model->addOperand(&type52);
+  auto op43 = model->addOperand(&type53);
   // Phase 2, operations
   static int32_t param3_init[] = {3, 2};
   model->setOperandValue(param3, param3_init, sizeof(int32_t) * 2);
diff --git a/nn/runtime/test/generated/tests/depthwise_conv2d_v1_2.mod.py.cpp b/nn/runtime/test/generated/tests/depthwise_conv2d_v1_2.mod.py.cpp
index d2ae001..87cba0e 100644
--- a/nn/runtime/test/generated/tests/depthwise_conv2d_v1_2.mod.py.cpp
+++ b/nn/runtime/test/generated/tests/depthwise_conv2d_v1_2.mod.py.cpp
@@ -1018,50 +1018,50 @@
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1) {
-    execute(depthwise_conv2d_v1_2::CreateModel,
-            depthwise_conv2d_v1_2::is_ignored,
+    execute(depthwise_conv2d_v1_2::CreateModel_quant_output_multiplier_gt_1,
+            depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1,
             depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_relaxed) {
-    execute(depthwise_conv2d_v1_2::CreateModel_relaxed,
-            depthwise_conv2d_v1_2::is_ignored_relaxed,
+    execute(depthwise_conv2d_v1_2::CreateModel_quant_output_multiplier_gt_1_relaxed,
+            depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_relaxed,
             depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_relaxed());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_weight_as_input) {
-    execute(depthwise_conv2d_v1_2::CreateModel_weight_as_input,
-            depthwise_conv2d_v1_2::is_ignored_weight_as_input,
+    execute(depthwise_conv2d_v1_2::CreateModel_quant_output_multiplier_gt_1_weight_as_input,
+            depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_weight_as_input,
             depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_weight_as_input());
 }
 
 TEST_F(GeneratedTests, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_weight_as_input_relaxed) {
-    execute(depthwise_conv2d_v1_2::CreateModel_weight_as_input_relaxed,
-            depthwise_conv2d_v1_2::is_ignored_weight_as_input_relaxed,
+    execute(depthwise_conv2d_v1_2::CreateModel_quant_output_multiplier_gt_1_weight_as_input_relaxed,
+            depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_weight_as_input_relaxed,
             depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_weight_as_input_relaxed());
 }
 
 TEST_F(DynamicOutputShapeTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape) {
-    execute(depthwise_conv2d_v1_2::CreateModel_dynamic_output_shape,
-            depthwise_conv2d_v1_2::is_ignored_dynamic_output_shape,
+    execute(depthwise_conv2d_v1_2::CreateModel_quant_output_multiplier_gt_1_dynamic_output_shape,
+            depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape,
             depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape());
 }
 
 TEST_F(DynamicOutputShapeTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed) {
-    execute(depthwise_conv2d_v1_2::CreateModel_dynamic_output_shape_relaxed,
-            depthwise_conv2d_v1_2::is_ignored_dynamic_output_shape_relaxed,
+    execute(depthwise_conv2d_v1_2::CreateModel_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed,
+            depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed,
             depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed());
 }
 
 TEST_F(DynamicOutputShapeTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input) {
-    execute(depthwise_conv2d_v1_2::CreateModel_dynamic_output_shape_weight_as_input,
-            depthwise_conv2d_v1_2::is_ignored_dynamic_output_shape_weight_as_input,
+    execute(depthwise_conv2d_v1_2::CreateModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input,
+            depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input,
             depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input());
 }
 
 TEST_F(DynamicOutputShapeTest, depthwise_conv2d_v1_2_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed) {
-    execute(depthwise_conv2d_v1_2::CreateModel_dynamic_output_shape_weight_as_input_relaxed,
-            depthwise_conv2d_v1_2::is_ignored_dynamic_output_shape_weight_as_input_relaxed,
+    execute(depthwise_conv2d_v1_2::CreateModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed,
+            depthwise_conv2d_v1_2::is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed,
             depthwise_conv2d_v1_2::get_examples_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed());
 }
 
diff --git a/nn/runtime/test/generated/vts_models/avg_pool_float_2_relaxed.model.cpp b/nn/runtime/test/generated/vts_models/avg_pool_float_2_relaxed.model.cpp
index 6741304..317a146 100644
--- a/nn/runtime/test/generated/vts_models/avg_pool_float_2_relaxed.model.cpp
+++ b/nn/runtime/test/generated/vts_models/avg_pool_float_2_relaxed.model.cpp
@@ -50,7 +50,7 @@
         },
         {
             .type = OperandType::TENSOR_FLOAT32,
-            .dimensions = {5, 11, 13, 3},
+            .dimensions = {5, 16, 18, 3},
             .numberOfConsumers = 0,
             .scale = 0.0f,
             .zeroPoint = 0,
@@ -70,7 +70,7 @@
     const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {5};
     std::vector<uint8_t> operandValues = {
-      50, 0, 0, 0, 5, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0
+      30, 0, 0, 0, 5, 0, 0, 0, 35, 0, 0, 0, 0, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
@@ -160,7 +160,7 @@
     const std::vector<uint32_t> inputIndexes = {0};
     const std::vector<uint32_t> outputIndexes = {5};
     std::vector<uint8_t> operandValues = {
-      50, 0, 0, 0, 5, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0
+      30, 0, 0, 0, 5, 0, 0, 0, 35, 0, 0, 0, 0, 0, 0, 0
     };
     const std::vector<hidl_memory> pools = {};
 
diff --git a/nn/runtime/test/generated/vts_models/depthwise_conv2d_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/depthwise_conv2d_v1_2.model.cpp
index e28f898..dd2002b 100644
--- a/nn/runtime/test/generated/vts_models/depthwise_conv2d_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/depthwise_conv2d_v1_2.model.cpp
@@ -24609,13 +24609,13 @@
 }
 
 // Create the model
-Model createTestModel() {
+Model createTestModel_quant_output_multiplier_gt_1() {
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 3, 2, 2},
             .numberOfConsumers = 1,
-            .scale = 0.5f,
+            .scale = 1.0058823529411764f,
             .zeroPoint = 127,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24633,7 +24633,7 @@
             .type = OperandType::TENSOR_INT32,
             .dimensions = {4},
             .numberOfConsumers = 1,
-            .scale = 0.5029411764705882f,
+            .scale = 1.0117993079584775f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
             .location = {.poolIndex = 0, .offset = 16, .length = 16},
@@ -24719,19 +24719,19 @@
     };
 }
 
-inline bool is_ignored(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
 // Create the model
-Model createTestModel_relaxed() {
+Model createTestModel_quant_output_multiplier_gt_1_relaxed() {
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 3, 2, 2},
             .numberOfConsumers = 1,
-            .scale = 0.5f,
+            .scale = 1.0058823529411764f,
             .zeroPoint = 127,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24749,7 +24749,7 @@
             .type = OperandType::TENSOR_INT32,
             .dimensions = {4},
             .numberOfConsumers = 1,
-            .scale = 0.5029411764705882f,
+            .scale = 1.0117993079584775f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
             .location = {.poolIndex = 0, .offset = 16, .length = 16},
@@ -24836,19 +24836,19 @@
     };
 }
 
-inline bool is_ignored_relaxed(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_relaxed(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
 // Create the model
-Model createTestModel_weight_as_input() {
+Model createTestModel_quant_output_multiplier_gt_1_weight_as_input() {
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 3, 2, 2},
             .numberOfConsumers = 1,
-            .scale = 0.5f,
+            .scale = 1.0058823529411764f,
             .zeroPoint = 127,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24866,7 +24866,7 @@
             .type = OperandType::TENSOR_INT32,
             .dimensions = {4},
             .numberOfConsumers = 1,
-            .scale = 0.5029411764705882f,
+            .scale = 1.0117993079584775f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24952,19 +24952,19 @@
     };
 }
 
-inline bool is_ignored_weight_as_input(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_weight_as_input(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
 // Create the model
-Model createTestModel_weight_as_input_relaxed() {
+Model createTestModel_quant_output_multiplier_gt_1_weight_as_input_relaxed() {
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 3, 2, 2},
             .numberOfConsumers = 1,
-            .scale = 0.5f,
+            .scale = 1.0058823529411764f,
             .zeroPoint = 127,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -24982,7 +24982,7 @@
             .type = OperandType::TENSOR_INT32,
             .dimensions = {4},
             .numberOfConsumers = 1,
-            .scale = 0.5029411764705882f,
+            .scale = 1.0117993079584775f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -25069,19 +25069,19 @@
     };
 }
 
-inline bool is_ignored_weight_as_input_relaxed(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_weight_as_input_relaxed(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
 // Create the model
-Model createTestModel_dynamic_output_shape() {
+Model createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape() {
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 3, 2, 2},
             .numberOfConsumers = 1,
-            .scale = 0.5f,
+            .scale = 1.0058823529411764f,
             .zeroPoint = 127,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -25099,7 +25099,7 @@
             .type = OperandType::TENSOR_INT32,
             .dimensions = {4},
             .numberOfConsumers = 1,
-            .scale = 0.5029411764705882f,
+            .scale = 1.0117993079584775f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
             .location = {.poolIndex = 0, .offset = 16, .length = 16},
@@ -25185,19 +25185,19 @@
     };
 }
 
-inline bool is_ignored_dynamic_output_shape(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
 // Create the model
-Model createTestModel_dynamic_output_shape_relaxed() {
+Model createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed() {
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 3, 2, 2},
             .numberOfConsumers = 1,
-            .scale = 0.5f,
+            .scale = 1.0058823529411764f,
             .zeroPoint = 127,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -25215,7 +25215,7 @@
             .type = OperandType::TENSOR_INT32,
             .dimensions = {4},
             .numberOfConsumers = 1,
-            .scale = 0.5029411764705882f,
+            .scale = 1.0117993079584775f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::CONSTANT_COPY,
             .location = {.poolIndex = 0, .offset = 16, .length = 16},
@@ -25302,19 +25302,19 @@
     };
 }
 
-inline bool is_ignored_dynamic_output_shape_relaxed(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_relaxed(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
 // Create the model
-Model createTestModel_dynamic_output_shape_weight_as_input() {
+Model createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input() {
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 3, 2, 2},
             .numberOfConsumers = 1,
-            .scale = 0.5f,
+            .scale = 1.0058823529411764f,
             .zeroPoint = 127,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -25332,7 +25332,7 @@
             .type = OperandType::TENSOR_INT32,
             .dimensions = {4},
             .numberOfConsumers = 1,
-            .scale = 0.5029411764705882f,
+            .scale = 1.0117993079584775f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -25418,19 +25418,19 @@
     };
 }
 
-inline bool is_ignored_dynamic_output_shape_weight_as_input(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
 
 // Create the model
-Model createTestModel_dynamic_output_shape_weight_as_input_relaxed() {
+Model createTestModel_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed() {
     const std::vector<Operand> operands = {
         {
             .type = OperandType::TENSOR_QUANT8_ASYMM,
             .dimensions = {1, 3, 2, 2},
             .numberOfConsumers = 1,
-            .scale = 0.5f,
+            .scale = 1.0058823529411764f,
             .zeroPoint = 127,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -25448,7 +25448,7 @@
             .type = OperandType::TENSOR_INT32,
             .dimensions = {4},
             .numberOfConsumers = 1,
-            .scale = 0.5029411764705882f,
+            .scale = 1.0117993079584775f,
             .zeroPoint = 0,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
@@ -25535,7 +25535,7 @@
     };
 }
 
-inline bool is_ignored_dynamic_output_shape_weight_as_input_relaxed(int i) {
+inline bool is_ignored_quant_output_multiplier_gt_1_dynamic_output_shape_weight_as_input_relaxed(int i) {
   static std::set<int> ignore = {};
   return ignore.find(i) != ignore.end();
 }
diff --git a/nn/runtime/test/generated/vts_models/space_to_batch_v1_2.model.cpp b/nn/runtime/test/generated/vts_models/space_to_batch_v1_2.model.cpp
index d5cb3ae..5d65c0d 100644
--- a/nn/runtime/test/generated/vts_models/space_to_batch_v1_2.model.cpp
+++ b/nn/runtime/test/generated/vts_models/space_to_batch_v1_2.model.cpp
@@ -2817,7 +2817,7 @@
             .dimensions = {1, 5, 2, 1},
             .numberOfConsumers = 1,
             .scale = 0.5f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
@@ -2853,7 +2853,7 @@
             .dimensions = {6, 2, 2, 1},
             .numberOfConsumers = 0,
             .scale = 0.5f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         }
@@ -3138,7 +3138,7 @@
             .dimensions = {1, 1, 5, 2},
             .numberOfConsumers = 1,
             .scale = 0.5f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
@@ -3174,7 +3174,7 @@
             .dimensions = {6, 1, 2, 2},
             .numberOfConsumers = 0,
             .scale = 0.5f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         }
@@ -3459,7 +3459,7 @@
             .dimensions = {1, 5, 2, 1},
             .numberOfConsumers = 1,
             .scale = 0.5f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
@@ -3495,7 +3495,7 @@
             .dimensions = {0, 0, 0, 0},
             .numberOfConsumers = 0,
             .scale = 0.5f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         }
@@ -3780,7 +3780,7 @@
             .dimensions = {1, 1, 5, 2},
             .numberOfConsumers = 1,
             .scale = 0.5f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
@@ -3816,7 +3816,7 @@
             .dimensions = {0, 0, 0, 0},
             .numberOfConsumers = 0,
             .scale = 0.5f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         }
@@ -4101,7 +4101,7 @@
             .dimensions = {1, 4, 2, 1},
             .numberOfConsumers = 1,
             .scale = 0.25f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
@@ -4137,7 +4137,7 @@
             .dimensions = {6, 2, 4, 1},
             .numberOfConsumers = 0,
             .scale = 0.25f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         }
@@ -4422,7 +4422,7 @@
             .dimensions = {1, 1, 4, 2},
             .numberOfConsumers = 1,
             .scale = 0.25f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
@@ -4458,7 +4458,7 @@
             .dimensions = {6, 1, 2, 4},
             .numberOfConsumers = 0,
             .scale = 0.25f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         }
@@ -4743,7 +4743,7 @@
             .dimensions = {1, 4, 2, 1},
             .numberOfConsumers = 1,
             .scale = 0.25f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
@@ -4779,7 +4779,7 @@
             .dimensions = {0, 0, 0, 0},
             .numberOfConsumers = 0,
             .scale = 0.25f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         }
@@ -5064,7 +5064,7 @@
             .dimensions = {1, 1, 4, 2},
             .numberOfConsumers = 1,
             .scale = 0.25f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_INPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         },
@@ -5100,7 +5100,7 @@
             .dimensions = {0, 0, 0, 0},
             .numberOfConsumers = 0,
             .scale = 0.25f,
-            .zeroPoint = 0,
+            .zeroPoint = 128,
             .lifetime = OperandLifeTime::MODEL_OUTPUT,
             .location = {.poolIndex = 0, .offset = 0, .length = 0},
         }
diff --git a/nn/runtime/test/specs/V1_1/avg_pool_float_2_relaxed.mod.py b/nn/runtime/test/specs/V1_1/avg_pool_float_2_relaxed.mod.py
index 8ffa775..b94c37e 100644
--- a/nn/runtime/test/specs/V1_1/avg_pool_float_2_relaxed.mod.py
+++ b/nn/runtime/test/specs/V1_1/avg_pool_float_2_relaxed.mod.py
@@ -24,9 +24,11 @@
 
 i0 = Input("i0", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (bat, row, col, chn))
 
+# This was lowered from values in avg_pool_float_2, as it wasn't possible
+# to evaluate it with expected accuracy requirements with fp16 accumulator.
 std = 5
-flt = 100
-pad = 50
+flt = 35
+pad = 30
 
 stride = Int32Scalar("stride", std)
 filt = Int32Scalar("filter", flt)
diff --git a/nn/runtime/test/specs/V1_2/depthwise_conv2d_v1_2.mod.py b/nn/runtime/test/specs/V1_2/depthwise_conv2d_v1_2.mod.py
index c069dc0..1ceb9c4 100644
--- a/nn/runtime/test/specs/V1_2/depthwise_conv2d_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/depthwise_conv2d_v1_2.mod.py
@@ -142,7 +142,7 @@
 }).AddNchw(i4, o4, layout).AddInput(f4, b4).AddVariations("relaxed", "float16", quant8, channelQuant8)
 
 # TEST 9: quantized with scale product greater than output scale
-input_scale = 127.5 / 255
+input_scale = 256.5 / 255
 input_zero_point = 127
 filter_scale = 256.5 / 255
 filter_zero_point = 128
@@ -157,11 +157,11 @@
 b9 = Parameter("op3", ("TENSOR_INT32", [4], input_scale * filter_scale, 0),
                [2, 4, 6, 8])
 o9 = Output("op4", ("TENSOR_QUANT8_ASYMM", [1, 2, 1, 4], 1.0, 127))
-model9 = Model().Operation("DEPTHWISE_CONV_2D", i9, f9, b9, 2, 1, 1, 2,
+model9 = Model("quant_output_multiplier_gt_1").Operation("DEPTHWISE_CONV_2D", i9, f9, b9, 2, 1, 1, 2,
                            0).To(o9)
 
 # Instantiate an example
 example = Example({
     i9: [129, 131, 141, 143, 133, 135, 145, 147, 137, 139, 149, 151],
-    o9: [198, 93, 227, 107, 219, 101, 255, 123]
-}, model=model9, name="quant_output_multiplier_gt_1").AddInput(f9, b9).AddVariations("relaxed")
+    o9: [255, 58, 255, 87, 255, 74, 255, 119]
+}, model=model9).AddInput(f9, b9).AddVariations("relaxed")
diff --git a/nn/runtime/test/specs/V1_2/softmax_v1_2.mod.py b/nn/runtime/test/specs/V1_2/softmax_v1_2.mod.py
index c04a0d3..f01c8de 100644
--- a/nn/runtime/test/specs/V1_2/softmax_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/softmax_v1_2.mod.py
@@ -25,9 +25,13 @@
 })
 
 example1 = {
-    i: [1., 2., 3., 4., 5., -1., -2., -3., -4., -5.] * 4,
-    o: [0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
-         0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231] * 4
+    i: [17., 16., 15., 14.,  1.,
+        -1., -2., -3., -4., -17.] * 4,
+    o: [0.643914213228014,
+        0.236882800924671,
+        0.087144312427294,
+        0.032058600957022,
+        7.246299848982885e-08] * 8
 }
 example2 = {
     i: [1., 2., 3., 4., 5., -1., -2., -3., -4., -5.] * 4,
diff --git a/nn/runtime/test/specs/V1_2/space_to_batch_v1_2.mod.py b/nn/runtime/test/specs/V1_2/space_to_batch_v1_2.mod.py
index dfe234f..356fae5 100644
--- a/nn/runtime/test/specs/V1_2/space_to_batch_v1_2.mod.py
+++ b/nn/runtime/test/specs/V1_2/space_to_batch_v1_2.mod.py
@@ -61,8 +61,8 @@
 
 # Additional data type
 quant8 = DataTypeConverter().Identify({
-    i3: ("TENSOR_QUANT8_ASYMM", 0.5, 0),
-    o3: ("TENSOR_QUANT8_ASYMM", 0.5, 0)
+    i3: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
+    o3: ("TENSOR_QUANT8_ASYMM", 0.5, 128)
 })
 
 # Instantiate an example
@@ -81,8 +81,8 @@
 
 # Additional data type
 quant8 = DataTypeConverter().Identify({
-    i4: ("TENSOR_QUANT8_ASYMM", 0.25, 0),
-    o4: ("TENSOR_QUANT8_ASYMM", 0.25, 0)
+    i4: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
+    o4: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
 })
 
 # Instantiate an example